lhoestq HF Staff commited on
Commit
c613e45
·
verified ·
1 Parent(s): 9efb105

Add 'rte' config data files

Browse files
README.md CHANGED
@@ -207,6 +207,32 @@ dataset_info:
207
  num_examples: 390965
208
  download_size: 73472088
209
  dataset_size: 111725685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  - config_name: sst2
211
  features:
212
  - name: sentence
@@ -310,6 +336,14 @@ configs:
310
  path: qqp/validation-*
311
  - split: test
312
  path: qqp/test-*
 
 
 
 
 
 
 
 
313
  - config_name: sst2
314
  data_files:
315
  - split: train
 
207
  num_examples: 390965
208
  download_size: 73472088
209
  dataset_size: 111725685
210
+ - config_name: rte
211
+ features:
212
+ - name: sentence1
213
+ dtype: string
214
+ - name: sentence2
215
+ dtype: string
216
+ - name: label
217
+ dtype:
218
+ class_label:
219
+ names:
220
+ '0': entailment
221
+ '1': not_entailment
222
+ - name: idx
223
+ dtype: int32
224
+ splits:
225
+ - name: train
226
+ num_bytes: 847320
227
+ num_examples: 2490
228
+ - name: validation
229
+ num_bytes: 90728
230
+ num_examples: 277
231
+ - name: test
232
+ num_bytes: 974053
233
+ num_examples: 3000
234
+ download_size: 1267150
235
+ dataset_size: 1912101
236
  - config_name: sst2
237
  features:
238
  - name: sentence
 
336
  path: qqp/validation-*
337
  - split: test
338
  path: qqp/test-*
339
+ - config_name: rte
340
+ data_files:
341
+ - split: train
342
+ path: rte/train-*
343
+ - split: validation
344
+ path: rte/validation-*
345
+ - split: test
346
+ path: rte/test-*
347
  - config_name: sst2
348
  data_files:
349
  - split: train
dataset_infos.json CHANGED
@@ -533,39 +533,32 @@
533
  },
534
  "rte": {
535
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
536
- "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
537
  "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
538
  "license": "",
539
  "features": {
540
  "sentence1": {
541
  "dtype": "string",
542
- "id": null,
543
  "_type": "Value"
544
  },
545
  "sentence2": {
546
  "dtype": "string",
547
- "id": null,
548
  "_type": "Value"
549
  },
550
  "label": {
551
- "num_classes": 2,
552
  "names": [
553
  "entailment",
554
  "not_entailment"
555
  ],
556
- "names_file": null,
557
- "id": null,
558
  "_type": "ClassLabel"
559
  },
560
  "idx": {
561
  "dtype": "int32",
562
- "id": null,
563
  "_type": "Value"
564
  }
565
  },
566
- "post_processed": null,
567
- "supervised_keys": null,
568
- "builder_name": "glue",
569
  "config_name": "rte",
570
  "version": {
571
  "version_str": "1.0.0",
@@ -575,35 +568,28 @@
575
  "patch": 0
576
  },
577
  "splits": {
578
- "test": {
579
- "name": "test",
580
- "num_bytes": 975936,
581
- "num_examples": 3000,
582
- "dataset_name": "glue"
583
- },
584
  "train": {
585
  "name": "train",
586
- "num_bytes": 848888,
587
  "num_examples": 2490,
588
- "dataset_name": "glue"
589
  },
590
  "validation": {
591
  "name": "validation",
592
- "num_bytes": 90911,
593
  "num_examples": 277,
594
- "dataset_name": "glue"
595
- }
596
- },
597
- "download_checksums": {
598
- "https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {
599
- "num_bytes": 697150,
600
- "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"
601
  }
602
  },
603
- "download_size": 697150,
604
- "post_processing_size": null,
605
- "dataset_size": 1915735,
606
- "size_in_bytes": 2612885
607
  },
608
  "wnli": {
609
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
533
  },
534
  "rte": {
535
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
536
+ "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
537
  "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
538
  "license": "",
539
  "features": {
540
  "sentence1": {
541
  "dtype": "string",
 
542
  "_type": "Value"
543
  },
544
  "sentence2": {
545
  "dtype": "string",
 
546
  "_type": "Value"
547
  },
548
  "label": {
 
549
  "names": [
550
  "entailment",
551
  "not_entailment"
552
  ],
 
 
553
  "_type": "ClassLabel"
554
  },
555
  "idx": {
556
  "dtype": "int32",
 
557
  "_type": "Value"
558
  }
559
  },
560
+ "builder_name": "glue-ci",
561
+ "dataset_name": "glue-ci",
 
562
  "config_name": "rte",
563
  "version": {
564
  "version_str": "1.0.0",
 
568
  "patch": 0
569
  },
570
  "splits": {
 
 
 
 
 
 
571
  "train": {
572
  "name": "train",
573
+ "num_bytes": 847320,
574
  "num_examples": 2490,
575
+ "dataset_name": null
576
  },
577
  "validation": {
578
  "name": "validation",
579
+ "num_bytes": 90728,
580
  "num_examples": 277,
581
+ "dataset_name": null
582
+ },
583
+ "test": {
584
+ "name": "test",
585
+ "num_bytes": 974053,
586
+ "num_examples": 3000,
587
+ "dataset_name": null
588
  }
589
  },
590
+ "download_size": 1267150,
591
+ "dataset_size": 1912101,
592
+ "size_in_bytes": 3179251
 
593
  },
594
  "wnli": {
595
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
rte/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6cad83b691b7df6b73738a91111a0e196a294ab3988b3fb261a0bc7a455af0d
3
+ size 618851
rte/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adc4c0a0252a64a75b101f5e73bd5c1511864580763ddd4fb48c429e59f2dde2
3
+ size 580730
rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0487bc75ae68a5d7458807b57973c656ae9c47c64a114a8b01002226fddf4a
3
+ size 67569