lhoestq HF Staff commited on
Commit
9efb105
·
verified ·
1 Parent(s): bfc7d1d

Add 'qnli' config data files

Browse files
README.md CHANGED
@@ -155,6 +155,32 @@ dataset_info:
155
  num_examples: 1725
156
  download_size: 1028112
157
  dataset_size: 1492132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  - config_name: qqp
159
  features:
160
  - name: question1
@@ -268,6 +294,14 @@ configs:
268
  path: mrpc/validation-*
269
  - split: test
270
  path: mrpc/test-*
 
 
 
 
 
 
 
 
271
  - config_name: qqp
272
  data_files:
273
  - split: train
 
155
  num_examples: 1725
156
  download_size: 1028112
157
  dataset_size: 1492132
158
+ - config_name: qnli
159
+ features:
160
+ - name: question
161
+ dtype: string
162
+ - name: sentence
163
+ dtype: string
164
+ - name: label
165
+ dtype:
166
+ class_label:
167
+ names:
168
+ '0': entailment
169
+ '1': not_entailment
170
+ - name: idx
171
+ dtype: int32
172
+ splits:
173
+ - name: train
174
+ num_bytes: 25612443
175
+ num_examples: 104743
176
+ - name: validation
177
+ num_bytes: 1368304
178
+ num_examples: 5463
179
+ - name: test
180
+ num_bytes: 1373093
181
+ num_examples: 5463
182
+ download_size: 19190743
183
+ dataset_size: 28353840
184
  - config_name: qqp
185
  features:
186
  - name: question1
 
294
  path: mrpc/validation-*
295
  - split: test
296
  path: mrpc/test-*
297
+ - config_name: qnli
298
+ data_files:
299
+ - split: train
300
+ path: qnli/train-*
301
+ - split: validation
302
+ path: qnli/validation-*
303
+ - split: test
304
+ path: qnli/test-*
305
  - config_name: qqp
306
  data_files:
307
  - split: train
dataset_infos.json CHANGED
@@ -473,39 +473,32 @@
473
  },
474
  "qnli": {
475
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
476
- "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
477
  "homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
478
  "license": "",
479
  "features": {
480
  "question": {
481
  "dtype": "string",
482
- "id": null,
483
  "_type": "Value"
484
  },
485
  "sentence": {
486
  "dtype": "string",
487
- "id": null,
488
  "_type": "Value"
489
  },
490
  "label": {
491
- "num_classes": 2,
492
  "names": [
493
  "entailment",
494
  "not_entailment"
495
  ],
496
- "names_file": null,
497
- "id": null,
498
  "_type": "ClassLabel"
499
  },
500
  "idx": {
501
  "dtype": "int32",
502
- "id": null,
503
  "_type": "Value"
504
  }
505
  },
506
- "post_processed": null,
507
- "supervised_keys": null,
508
- "builder_name": "glue",
509
  "config_name": "qnli",
510
  "version": {
511
  "version_str": "1.0.0",
@@ -515,35 +508,28 @@
515
  "patch": 0
516
  },
517
  "splits": {
518
- "test": {
519
- "name": "test",
520
- "num_bytes": 1376516,
521
- "num_examples": 5463,
522
- "dataset_name": "glue"
523
- },
524
  "train": {
525
  "name": "train",
526
- "num_bytes": 25677924,
527
  "num_examples": 104743,
528
- "dataset_name": "glue"
529
  },
530
  "validation": {
531
  "name": "validation",
532
- "num_bytes": 1371727,
533
  "num_examples": 5463,
534
- "dataset_name": "glue"
535
- }
536
- },
537
- "download_checksums": {
538
- "https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {
539
- "num_bytes": 10627589,
540
- "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"
541
  }
542
  },
543
- "download_size": 10627589,
544
- "post_processing_size": null,
545
- "dataset_size": 28426167,
546
- "size_in_bytes": 39053756
547
  },
548
  "rte": {
549
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
473
  },
474
  "qnli": {
475
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
476
+ "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
477
  "homepage": "https://rajpurkar.github.io/SQuAD-explorer/",
478
  "license": "",
479
  "features": {
480
  "question": {
481
  "dtype": "string",
 
482
  "_type": "Value"
483
  },
484
  "sentence": {
485
  "dtype": "string",
 
486
  "_type": "Value"
487
  },
488
  "label": {
 
489
  "names": [
490
  "entailment",
491
  "not_entailment"
492
  ],
 
 
493
  "_type": "ClassLabel"
494
  },
495
  "idx": {
496
  "dtype": "int32",
 
497
  "_type": "Value"
498
  }
499
  },
500
+ "builder_name": "glue-ci",
501
+ "dataset_name": "glue-ci",
 
502
  "config_name": "qnli",
503
  "version": {
504
  "version_str": "1.0.0",
 
508
  "patch": 0
509
  },
510
  "splits": {
 
 
 
 
 
 
511
  "train": {
512
  "name": "train",
513
+ "num_bytes": 25612443,
514
  "num_examples": 104743,
515
+ "dataset_name": null
516
  },
517
  "validation": {
518
  "name": "validation",
519
+ "num_bytes": 1368304,
520
  "num_examples": 5463,
521
+ "dataset_name": null
522
+ },
523
+ "test": {
524
+ "name": "test",
525
+ "num_bytes": 1373093,
526
+ "num_examples": 5463,
527
+ "dataset_name": null
528
  }
529
  },
530
+ "download_size": 19190743,
531
+ "dataset_size": 28353840,
532
+ "size_in_bytes": 47544583
 
533
  },
534
  "rte": {
535
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
qnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6433b32f6a0bc104f023eab938d9a087f7776af8a90a82beb36d0f5ebde84411
3
+ size 872949
qnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31a832f5fe31b9944a643bcb47f8c34efc5e770573ccb41d5c81e5ed0d71c816
3
+ size 17449932
qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868ef7832318910590393e9376088710be6f9f2eeb1d77f1e890b8e174c51a87
3
+ size 867862