Convert dataset to Parquet

#4
by lhoestq HF Staff - opened
Files changed (43) hide show
  1. README.md +396 -0
  2. dummy/mrpc/1.0.0/dummy_data.zip → ax/test-00000-of-00001.parquet +2 -2
  3. dummy/ax/1.0.0/dummy_data.zip → cola/test-00000-of-00001.parquet +2 -2
  4. cola/train-00000-of-00001.parquet +3 -0
  5. dummy/cola/1.0.0/dummy_data.zip → cola/validation-00000-of-00001.parquet +2 -2
  6. dataset_infos.json +0 -1
  7. dummy/qnli/1.0.0/dummy_data.zip +0 -3
  8. dummy/qqp/1.0.0/dummy_data.zip +0 -3
  9. dummy/rte/1.0.0/dummy_data.zip +0 -3
  10. dummy/sst2/1.0.0/dummy_data.zip +0 -3
  11. dummy/stsb/1.0.0/dummy_data.zip +0 -3
  12. dummy/wnli/1.0.0/dummy_data.zip +0 -3
  13. glue-ci.py +0 -628
  14. mnli/test_matched-00000-of-00001.parquet +3 -0
  15. mnli/test_mismatched-00000-of-00001.parquet +3 -0
  16. mnli/train-00000-of-00001.parquet +3 -0
  17. mnli/validation_matched-00000-of-00001.parquet +3 -0
  18. mnli/validation_mismatched-00000-of-00001.parquet +3 -0
  19. mnli_matched/test-00000-of-00001.parquet +3 -0
  20. mnli_matched/validation-00000-of-00001.parquet +3 -0
  21. mnli_mismatched/test-00000-of-00001.parquet +3 -0
  22. mnli_mismatched/validation-00000-of-00001.parquet +3 -0
  23. mrpc/test-00000-of-00001.parquet +3 -0
  24. mrpc/train-00000-of-00001.parquet +3 -0
  25. dummy/mnli/1.0.0/dummy_data.zip → mrpc/validation-00000-of-00001.parquet +2 -2
  26. qnli/test-00000-of-00001.parquet +3 -0
  27. qnli/train-00000-of-00001.parquet +3 -0
  28. qnli/validation-00000-of-00001.parquet +3 -0
  29. qqp/test-00000-of-00001.parquet +3 -0
  30. qqp/train-00000-of-00001.parquet +3 -0
  31. qqp/validation-00000-of-00001.parquet +3 -0
  32. rte/test-00000-of-00001.parquet +3 -0
  33. rte/train-00000-of-00001.parquet +3 -0
  34. rte/validation-00000-of-00001.parquet +3 -0
  35. sst2/test-00000-of-00001.parquet +3 -0
  36. sst2/train-00000-of-00001.parquet +3 -0
  37. sst2/validation-00000-of-00001.parquet +3 -0
  38. stsb/test-00000-of-00001.parquet +3 -0
  39. stsb/train-00000-of-00001.parquet +3 -0
  40. stsb/validation-00000-of-00001.parquet +3 -0
  41. wnli/test-00000-of-00001.parquet +3 -0
  42. wnli/train-00000-of-00001.parquet +3 -0
  43. wnli/validation-00000-of-00001.parquet +3 -0
README.md CHANGED
@@ -23,6 +23,402 @@ task_ids:
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  train-eval-index:
27
  - config: cola
28
  task: text-classification
 
23
  - text-scoring
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
+ dataset_info:
27
+ - config_name: ax
28
+ features:
29
+ - name: premise
30
+ dtype: string
31
+ - name: hypothesis
32
+ dtype: string
33
+ - name: label
34
+ dtype:
35
+ class_label:
36
+ names:
37
+ '0': entailment
38
+ '1': neutral
39
+ '2': contradiction
40
+ - name: idx
41
+ dtype: int32
42
+ splits:
43
+ - name: test
44
+ num_bytes: 237694
45
+ num_examples: 1104
46
+ download_size: 79191
47
+ dataset_size: 237694
48
+ - config_name: cola
49
+ features:
50
+ - name: sentence
51
+ dtype: string
52
+ - name: label
53
+ dtype:
54
+ class_label:
55
+ names:
56
+ '0': unacceptable
57
+ '1': acceptable
58
+ - name: idx
59
+ dtype: int32
60
+ splits:
61
+ - name: train
62
+ num_bytes: 484869
63
+ num_examples: 8551
64
+ - name: validation
65
+ num_bytes: 60322
66
+ num_examples: 1043
67
+ - name: test
68
+ num_bytes: 60513
69
+ num_examples: 1063
70
+ download_size: 322394
71
+ dataset_size: 605704
72
+ - config_name: mnli
73
+ features:
74
+ - name: premise
75
+ dtype: string
76
+ - name: hypothesis
77
+ dtype: string
78
+ - name: label
79
+ dtype:
80
+ class_label:
81
+ names:
82
+ '0': entailment
83
+ '1': neutral
84
+ '2': contradiction
85
+ - name: idx
86
+ dtype: int32
87
+ splits:
88
+ - name: train
89
+ num_bytes: 74619646
90
+ num_examples: 392702
91
+ - name: validation_matched
92
+ num_bytes: 1833783
93
+ num_examples: 9815
94
+ - name: validation_mismatched
95
+ num_bytes: 1949231
96
+ num_examples: 9832
97
+ - name: test_matched
98
+ num_bytes: 1848654
99
+ num_examples: 9796
100
+ - name: test_mismatched
101
+ num_bytes: 1950703
102
+ num_examples: 9847
103
+ download_size: 56899587
104
+ dataset_size: 82202017
105
+ - config_name: mnli_matched
106
+ features:
107
+ - name: premise
108
+ dtype: string
109
+ - name: hypothesis
110
+ dtype: string
111
+ - name: label
112
+ dtype:
113
+ class_label:
114
+ names:
115
+ '0': entailment
116
+ '1': neutral
117
+ '2': contradiction
118
+ - name: idx
119
+ dtype: int32
120
+ splits:
121
+ - name: validation
122
+ num_bytes: 1833783
123
+ num_examples: 9815
124
+ - name: test
125
+ num_bytes: 1848654
126
+ num_examples: 9796
127
+ download_size: 2421421
128
+ dataset_size: 3682437
129
+ - config_name: mnli_mismatched
130
+ features:
131
+ - name: premise
132
+ dtype: string
133
+ - name: hypothesis
134
+ dtype: string
135
+ - name: label
136
+ dtype:
137
+ class_label:
138
+ names:
139
+ '0': entailment
140
+ '1': neutral
141
+ '2': contradiction
142
+ - name: idx
143
+ dtype: int32
144
+ splits:
145
+ - name: validation
146
+ num_bytes: 1949231
147
+ num_examples: 9832
148
+ - name: test
149
+ num_bytes: 1950703
150
+ num_examples: 9847
151
+ download_size: 2496567
152
+ dataset_size: 3899934
153
+ - config_name: mrpc
154
+ features:
155
+ - name: sentence1
156
+ dtype: string
157
+ - name: sentence2
158
+ dtype: string
159
+ - name: label
160
+ dtype:
161
+ class_label:
162
+ names:
163
+ '0': not_equivalent
164
+ '1': equivalent
165
+ - name: idx
166
+ dtype: int32
167
+ splits:
168
+ - name: train
169
+ num_bytes: 943843
170
+ num_examples: 3668
171
+ - name: validation
172
+ num_bytes: 105879
173
+ num_examples: 408
174
+ - name: test
175
+ num_bytes: 442410
176
+ num_examples: 1725
177
+ download_size: 1028112
178
+ dataset_size: 1492132
179
+ - config_name: qnli
180
+ features:
181
+ - name: question
182
+ dtype: string
183
+ - name: sentence
184
+ dtype: string
185
+ - name: label
186
+ dtype:
187
+ class_label:
188
+ names:
189
+ '0': entailment
190
+ '1': not_entailment
191
+ - name: idx
192
+ dtype: int32
193
+ splits:
194
+ - name: train
195
+ num_bytes: 25612443
196
+ num_examples: 104743
197
+ - name: validation
198
+ num_bytes: 1368304
199
+ num_examples: 5463
200
+ - name: test
201
+ num_bytes: 1373093
202
+ num_examples: 5463
203
+ download_size: 19190743
204
+ dataset_size: 28353840
205
+ - config_name: qqp
206
+ features:
207
+ - name: question1
208
+ dtype: string
209
+ - name: question2
210
+ dtype: string
211
+ - name: label
212
+ dtype:
213
+ class_label:
214
+ names:
215
+ '0': not_duplicate
216
+ '1': duplicate
217
+ - name: idx
218
+ dtype: int32
219
+ splits:
220
+ - name: train
221
+ num_bytes: 50900820
222
+ num_examples: 363846
223
+ - name: validation
224
+ num_bytes: 5653754
225
+ num_examples: 40430
226
+ - name: test
227
+ num_bytes: 55171111
228
+ num_examples: 390965
229
+ download_size: 73472088
230
+ dataset_size: 111725685
231
+ - config_name: rte
232
+ features:
233
+ - name: sentence1
234
+ dtype: string
235
+ - name: sentence2
236
+ dtype: string
237
+ - name: label
238
+ dtype:
239
+ class_label:
240
+ names:
241
+ '0': entailment
242
+ '1': not_entailment
243
+ - name: idx
244
+ dtype: int32
245
+ splits:
246
+ - name: train
247
+ num_bytes: 847320
248
+ num_examples: 2490
249
+ - name: validation
250
+ num_bytes: 90728
251
+ num_examples: 277
252
+ - name: test
253
+ num_bytes: 974053
254
+ num_examples: 3000
255
+ download_size: 1267150
256
+ dataset_size: 1912101
257
+ - config_name: sst2
258
+ features:
259
+ - name: sentence
260
+ dtype: string
261
+ - name: label
262
+ dtype:
263
+ class_label:
264
+ names:
265
+ '0': negative
266
+ '1': positive
267
+ - name: idx
268
+ dtype: int32
269
+ splits:
270
+ - name: train
271
+ num_bytes: 4681603
272
+ num_examples: 67349
273
+ - name: validation
274
+ num_bytes: 106252
275
+ num_examples: 872
276
+ - name: test
277
+ num_bytes: 216640
278
+ num_examples: 1821
279
+ download_size: 3305163
280
+ dataset_size: 5004495
281
+ - config_name: stsb
282
+ features:
283
+ - name: sentence1
284
+ dtype: string
285
+ - name: sentence2
286
+ dtype: string
287
+ - name: label
288
+ dtype: float32
289
+ - name: idx
290
+ dtype: int32
291
+ splits:
292
+ - name: train
293
+ num_bytes: 754791
294
+ num_examples: 5749
295
+ - name: validation
296
+ num_bytes: 216064
297
+ num_examples: 1500
298
+ - name: test
299
+ num_bytes: 169974
300
+ num_examples: 1379
301
+ download_size: 761235
302
+ dataset_size: 1140829
303
+ - config_name: wnli
304
+ features:
305
+ - name: sentence1
306
+ dtype: string
307
+ - name: sentence2
308
+ dtype: string
309
+ - name: label
310
+ dtype:
311
+ class_label:
312
+ names:
313
+ '0': not_entailment
314
+ '1': entailment
315
+ - name: idx
316
+ dtype: int32
317
+ splits:
318
+ - name: train
319
+ num_bytes: 107109
320
+ num_examples: 635
321
+ - name: validation
322
+ num_bytes: 12162
323
+ num_examples: 71
324
+ - name: test
325
+ num_bytes: 37889
326
+ num_examples: 146
327
+ download_size: 61250
328
+ dataset_size: 157160
329
+ configs:
330
+ - config_name: ax
331
+ data_files:
332
+ - split: test
333
+ path: ax/test-*
334
+ - config_name: cola
335
+ data_files:
336
+ - split: train
337
+ path: cola/train-*
338
+ - split: validation
339
+ path: cola/validation-*
340
+ - split: test
341
+ path: cola/test-*
342
+ - config_name: mnli
343
+ data_files:
344
+ - split: train
345
+ path: mnli/train-*
346
+ - split: validation_matched
347
+ path: mnli/validation_matched-*
348
+ - split: validation_mismatched
349
+ path: mnli/validation_mismatched-*
350
+ - split: test_matched
351
+ path: mnli/test_matched-*
352
+ - split: test_mismatched
353
+ path: mnli/test_mismatched-*
354
+ - config_name: mnli_matched
355
+ data_files:
356
+ - split: validation
357
+ path: mnli_matched/validation-*
358
+ - split: test
359
+ path: mnli_matched/test-*
360
+ - config_name: mnli_mismatched
361
+ data_files:
362
+ - split: validation
363
+ path: mnli_mismatched/validation-*
364
+ - split: test
365
+ path: mnli_mismatched/test-*
366
+ - config_name: mrpc
367
+ data_files:
368
+ - split: train
369
+ path: mrpc/train-*
370
+ - split: validation
371
+ path: mrpc/validation-*
372
+ - split: test
373
+ path: mrpc/test-*
374
+ - config_name: qnli
375
+ data_files:
376
+ - split: train
377
+ path: qnli/train-*
378
+ - split: validation
379
+ path: qnli/validation-*
380
+ - split: test
381
+ path: qnli/test-*
382
+ - config_name: qqp
383
+ data_files:
384
+ - split: train
385
+ path: qqp/train-*
386
+ - split: validation
387
+ path: qqp/validation-*
388
+ - split: test
389
+ path: qqp/test-*
390
+ - config_name: rte
391
+ data_files:
392
+ - split: train
393
+ path: rte/train-*
394
+ - split: validation
395
+ path: rte/validation-*
396
+ - split: test
397
+ path: rte/test-*
398
+ - config_name: sst2
399
+ data_files:
400
+ - split: train
401
+ path: sst2/train-*
402
+ - split: validation
403
+ path: sst2/validation-*
404
+ - split: test
405
+ path: sst2/test-*
406
+ - config_name: stsb
407
+ data_files:
408
+ - split: train
409
+ path: stsb/train-*
410
+ - split: validation
411
+ path: stsb/validation-*
412
+ - split: test
413
+ path: stsb/test-*
414
+ - config_name: wnli
415
+ data_files:
416
+ - split: train
417
+ path: wnli/train-*
418
+ - split: validation
419
+ path: wnli/validation-*
420
+ - split: test
421
+ path: wnli/test-*
422
  train-eval-index:
423
  - config: cola
424
  task: text-classification
dummy/mrpc/1.0.0/dummy_data.zip → ax/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0bfe41b0047215524032750c1faf32c84c41566279fca9df1c35482640537aa6
3
- size 4539
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a47cbc39a3617a4121f9c30d5f0c77a0f37e96427dbaef01647cb0c4ff92638
3
+ size 79191
dummy/ax/1.0.0/dummy_data.zip → cola/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a34cfe9a95b80530887f488eb04e3514b322e0fa65c64f425ddb7aea449f69
3
- size 509
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a062af38745f295a5bd520db8ec9891c0b8bdf27ff41bfc758ddc5b6f8d13f2e
3
+ size 37133
cola/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93ec046925d08d9153e3a888e18c6d4355d3aca8c0c41b7e4a2b118e90d661b
3
+ size 248320
dummy/cola/1.0.0/dummy_data.zip → cola/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d678797c6eb84d3436868f8b5ac506f88f12bd51633245bd1a20af6021ac48d4
3
- size 1116
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34ca31bcad53a8a85805c872863ab167a245ca569f6105c603498e94ed8b1ad0
3
+ size 36941
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"cola": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{warstadt2018neural,\n title={Neural Network Acceptability Judgments},\n author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1805.12471},\n year={2018}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nyu-mll.github.io/CoLA/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["unacceptable", "acceptable"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "cola", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 61049, "num_examples": 1063, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 489149, "num_examples": 8551, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 60850, "num_examples": 1043, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/CoLA.zip": {"num_bytes": 376971, "checksum": "f212fcd832b8f7b435fb991f101abf89f96b933ab400603bf198960dfc32cbff"}}, "download_size": 376971, "post_processing_size": null, "dataset_size": 611048, "size_in_bytes": 988019}, "sst2": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://nlp.stanford.edu/sentiment/index.html", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217556, "num_examples": 1821, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 4715283, "num_examples": 67349, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106692, "num_examples": 872, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {"num_bytes": 7439277, "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"}}, "download_size": 7439277, "post_processing_size": null, "dataset_size": 5039531, "size_in_bytes": 12478808}, "mrpc": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_equivalent", "equivalent"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mrpc", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 443498, "num_examples": 1725, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 946146, "num_examples": 3668, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 106142, "num_examples": 408, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {"num_bytes": 6222, "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {"num_bytes": 1047044, "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"}, "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {"num_bytes": 441275, "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"}}, "download_size": 1494541, "post_processing_size": null, "dataset_size": 1495786, "size_in_bytes": 2990327}, "qqp": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@online{WinNT,\n author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},\n title = {First Quora Dataset Release: Question Pairs},\n year = {2017},\n url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},\n urldate = {2019-04-03}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n", "homepage": "https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 50901116, "num_examples": 363846, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 5653794, "num_examples": 40430, "dataset_name": "glue"}, "test": {"name": "test", "num_bytes": 55171431, "num_examples": 390965, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip": {"num_bytes": 41696084, "checksum": "40e7c862c04eb26ee04b67fd900e76c45c6ba8e6d8fab4f8f1f8072a1a3fbae0"}}, "download_size": 41696084, "post_processing_size": null, "dataset_size": 111726341, "size_in_bytes": 153422425}, "stsb": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{cer2017semeval,\n title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},\n author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},\n journal={arXiv preprint arXiv:1708.00055},\n year={2017}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"dtype": "float32", "id": null, "_type": "Value"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "stsb", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 170847, "num_examples": 1379, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 758394, "num_examples": 5749, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 217012, "num_examples": 1500, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/STS-B.zip": {"num_bytes": 802872, "checksum": "e60a6393de5a8b5b9bac5020a1554b54e3691f9d600b775bd131e613ac179c85"}}, "download_size": 802872, "post_processing_size": null, "dataset_size": 1146253, "size_in_bytes": 1949125}, "mnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test_matched": {"name": "test_matched", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "test_mismatched": {"name": "test_mismatched", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 74865118, "num_examples": 392702, "dataset_name": "glue"}, "validation_matched": {"name": "validation_matched", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}, "validation_mismatched": {"name": "validation_mismatched", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 82472081, "size_in_bytes": 395255588}, "mnli_mismatched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1956866, "num_examples": 9847, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1955384, "num_examples": 9832, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3912250, "size_in_bytes": 316695757}, "mnli_matched": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "http://www.nyu.edu/projects/bowman/multinli/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "mnli_matched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1854787, "num_examples": 9796, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1839926, "num_examples": 9815, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/MNLI.zip": {"num_bytes": 312783507, "checksum": "e7c1d896d26ed6caf700110645df426cc2d8ebf02a5ab743d5a5c68ac1c83633"}}, "download_size": 312783507, "post_processing_size": null, "dataset_size": 3694713, "size_in_bytes": 316478220}, "qnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@article{rajpurkar2016squad,\n title={Squad: 100,000+ questions for machine comprehension of text},\n author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},\n journal={arXiv preprint arXiv:1606.05250},\n year={2016}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 1376516, "num_examples": 5463, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 25677924, "num_examples": 104743, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 1371727, "num_examples": 5463, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip": {"num_bytes": 10627589, "checksum": "e634e78627a29adaecd4f955359b22bf5e70f2cbd93b493f2d624138a0c0e5f5"}}, "download_size": 10627589, "post_processing_size": null, "dataset_size": 28426167, "size_in_bytes": 39053756}, "rte": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{dagan2005pascal,\n title={The PASCAL recognising textual entailment challenge},\n author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},\n booktitle={Machine Learning Challenges Workshop},\n pages={177--190},\n year={2005},\n organization={Springer}\n}\n@inproceedings{bar2006second,\n title={The second pascal recognising textual entailment challenge},\n author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},\n booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},\n volume={6},\n number={1},\n pages={6--4},\n year={2006},\n organization={Venice}\n}\n@inproceedings{giampiccolo2007third,\n title={The third pascal recognizing textual entailment challenge},\n author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},\n booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},\n pages={1--9},\n year={2007},\n organization={Association for Computational Linguistics}\n}\n@inproceedings{bentivogli2009fifth,\n title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},\n author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},\n booktitle={TAC},\n year={2009}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://aclweb.org/aclwiki/Recognizing_Textual_Entailment", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 975936, "num_examples": 3000, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 848888, "num_examples": 2490, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 90911, "num_examples": 277, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/RTE.zip": {"num_bytes": 697150, "checksum": "6bf86de103ecd335f3441bd43574d23fef87ecc695977a63b82d5efb206556ee"}}, "download_size": 697150, "post_processing_size": null, "dataset_size": 1915735, "size_in_bytes": 2612885}, "wnli": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "@inproceedings{levesque2012winograd,\n title={The winograd schema challenge},\n author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},\n booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},\n year={2012}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_entailment", "entailment"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "wnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 37992, "num_examples": 146, "dataset_name": "glue"}, "train": {"name": "train", "num_bytes": 107517, "num_examples": 635, "dataset_name": "glue"}, "validation": {"name": "validation", "num_bytes": 12215, "num_examples": 71, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/WNLI.zip": {"num_bytes": 28999, "checksum": "ae0e8e4d16f4d46d4a0a566ec7ecceccfd3fbfaa4a7a4b4e02848c0f2561ac46"}}, "download_size": 28999, "post_processing_size": null, "dataset_size": 157724, "size_in_bytes": 186723}, "ax": {"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n", "citation": "\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.", "homepage": "https://gluebenchmark.com/diagnostics", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "names_file": null, "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "glue", "config_name": "ax", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 238392, "num_examples": 1104, "dataset_name": "glue"}}, "download_checksums": {"https://dl.fbaipublicfiles.com/glue/data/AX.tsv": {"num_bytes": 222257, "checksum": "0e13510b1bb14436ff7e2ee82338f0efb0133ecf2e73507a697dc210db3f05fd"}}, "download_size": 222257, "post_processing_size": null, "dataset_size": 238392, "size_in_bytes": 460649}}
 
 
dummy/qnli/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a771b312be26048e7d921ff4bf01ac7de224641cd51977629bb54b9839637fb0
3
- size 1859
 
 
 
 
dummy/qqp/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a1f6bf7c3ae0587a99d4ecfc2c4ab900efbd23dc1c68e2556426da9feab0163
3
- size 1588
 
 
 
 
dummy/rte/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bec2e7562503a3b7ef577986b4cd10b075818b66fb03df8d4dec79d28a5bf5f
3
- size 1613
 
 
 
 
dummy/sst2/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ff05ebd2679fd60f174cd19415e8dd0c2f701f49f8f9dbb63f7b30707d9b06e
3
- size 1143
 
 
 
 
dummy/stsb/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8027e1188e092ea53eede8a2b2bd245f4c98f2b37132ea5d7dd173bac36e025e
3
- size 1353
 
 
 
 
dummy/wnli/1.0.0/dummy_data.zip DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e52960c15224df1f7202371029b3a5fad3b4dfec72132d3c8b996ff03db92755
3
- size 1407
 
 
 
 
glue-ci.py DELETED
@@ -1,628 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """The General Language Understanding Evaluation (GLUE) benchmark."""
18
-
19
-
20
- import csv
21
- import os
22
- import textwrap
23
-
24
- import numpy as np
25
-
26
- import datasets
27
-
28
-
29
- _GLUE_CITATION = """\
30
- @inproceedings{wang2019glue,
31
- title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
32
- author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
33
- note={In the Proceedings of ICLR.},
34
- year={2019}
35
- }
36
- """
37
-
38
- _GLUE_DESCRIPTION = """\
39
- GLUE, the General Language Understanding Evaluation benchmark
40
- (https://gluebenchmark.com/) is a collection of resources for training,
41
- evaluating, and analyzing natural language understanding systems.
42
-
43
- """
44
-
45
- _MRPC_DEV_IDS = "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv"
46
- _MRPC_TRAIN = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt"
47
- _MRPC_TEST = "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt"
48
-
49
- _MNLI_BASE_KWARGS = dict(
50
- text_features={
51
- "premise": "sentence1",
52
- "hypothesis": "sentence2",
53
- },
54
- label_classes=["entailment", "neutral", "contradiction"],
55
- label_column="gold_label",
56
- data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
57
- data_dir="MNLI",
58
- citation=textwrap.dedent(
59
- """\
60
- @InProceedings{N18-1101,
61
- author = "Williams, Adina
62
- and Nangia, Nikita
63
- and Bowman, Samuel",
64
- title = "A Broad-Coverage Challenge Corpus for
65
- Sentence Understanding through Inference",
66
- booktitle = "Proceedings of the 2018 Conference of
67
- the North American Chapter of the
68
- Association for Computational Linguistics:
69
- Human Language Technologies, Volume 1 (Long
70
- Papers)",
71
- year = "2018",
72
- publisher = "Association for Computational Linguistics",
73
- pages = "1112--1122",
74
- location = "New Orleans, Louisiana",
75
- url = "http://aclweb.org/anthology/N18-1101"
76
- }
77
- @article{bowman2015large,
78
- title={A large annotated corpus for learning natural language inference},
79
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
80
- journal={arXiv preprint arXiv:1508.05326},
81
- year={2015}
82
- }"""
83
- ),
84
- url="http://www.nyu.edu/projects/bowman/multinli/",
85
- )
86
-
87
-
88
- class GlueConfig(datasets.BuilderConfig):
89
- """BuilderConfig for GLUE."""
90
-
91
- def __init__(
92
- self,
93
- text_features,
94
- label_column,
95
- data_url,
96
- data_dir,
97
- citation,
98
- url,
99
- label_classes=None,
100
- process_label=lambda x: x,
101
- **kwargs,
102
- ):
103
- """BuilderConfig for GLUE.
104
-
105
- Args:
106
- text_features: `dict[string, string]`, map from the name of the feature
107
- dict for each text field to the name of the column in the tsv file
108
- label_column: `string`, name of the column in the tsv file corresponding
109
- to the label
110
- data_url: `string`, url to download the zip file from
111
- data_dir: `string`, the path to the folder containing the tsv files in the
112
- downloaded zip
113
- citation: `string`, citation for the data set
114
- url: `string`, url for information about the data set
115
- label_classes: `list[string]`, the list of classes if the label is
116
- categorical. If not provided, then the label will be of type
117
- `datasets.Value('float32')`.
118
- process_label: `Function[string, any]`, function taking in the raw value
119
- of the label and processing it to the form required by the label feature
120
- **kwargs: keyword arguments forwarded to super.
121
- """
122
- super(GlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
123
- self.text_features = text_features
124
- self.label_column = label_column
125
- self.label_classes = label_classes
126
- self.data_url = data_url
127
- self.data_dir = data_dir
128
- self.citation = citation
129
- self.url = url
130
- self.process_label = process_label
131
-
132
-
133
- class Glue(datasets.GeneratorBasedBuilder):
134
- """The General Language Understanding Evaluation (GLUE) benchmark."""
135
-
136
- BUILDER_CONFIGS = [
137
- GlueConfig(
138
- name="cola",
139
- description=textwrap.dedent(
140
- """\
141
- The Corpus of Linguistic Acceptability consists of English
142
- acceptability judgments drawn from books and journal articles on
143
- linguistic theory. Each example is a sequence of words annotated
144
- with whether it is a grammatical English sentence."""
145
- ),
146
- text_features={"sentence": "sentence"},
147
- label_classes=["unacceptable", "acceptable"],
148
- label_column="is_acceptable",
149
- data_url="https://dl.fbaipublicfiles.com/glue/data/CoLA.zip",
150
- data_dir="CoLA",
151
- citation=textwrap.dedent(
152
- """\
153
- @article{warstadt2018neural,
154
- title={Neural Network Acceptability Judgments},
155
- author={Warstadt, Alex and Singh, Amanpreet and Bowman, Samuel R},
156
- journal={arXiv preprint arXiv:1805.12471},
157
- year={2018}
158
- }"""
159
- ),
160
- url="https://nyu-mll.github.io/CoLA/",
161
- ),
162
- GlueConfig(
163
- name="sst2",
164
- description=textwrap.dedent(
165
- """\
166
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
167
- human annotations of their sentiment. The task is to predict the sentiment of a
168
- given sentence. We use the two-way (positive/negative) class split, and use only
169
- sentence-level labels."""
170
- ),
171
- text_features={"sentence": "sentence"},
172
- label_classes=["negative", "positive"],
173
- label_column="label",
174
- data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
175
- data_dir="SST-2",
176
- citation=textwrap.dedent(
177
- """\
178
- @inproceedings{socher2013recursive,
179
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
180
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
181
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
182
- pages={1631--1642},
183
- year={2013}
184
- }"""
185
- ),
186
- url="https://datasets.stanford.edu/sentiment/index.html",
187
- ),
188
- GlueConfig(
189
- name="mrpc",
190
- description=textwrap.dedent(
191
- """\
192
- The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of
193
- sentence pairs automatically extracted from online news sources, with human annotations
194
- for whether the sentences in the pair are semantically equivalent."""
195
- ), # pylint: disable=line-too-long
196
- text_features={"sentence1": "", "sentence2": ""},
197
- label_classes=["not_equivalent", "equivalent"],
198
- label_column="Quality",
199
- data_url="", # MRPC isn't hosted by GLUE.
200
- data_dir="MRPC",
201
- citation=textwrap.dedent(
202
- """\
203
- @inproceedings{dolan2005automatically,
204
- title={Automatically constructing a corpus of sentential paraphrases},
205
- author={Dolan, William B and Brockett, Chris},
206
- booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},
207
- year={2005}
208
- }"""
209
- ),
210
- url="https://www.microsoft.com/en-us/download/details.aspx?id=52398",
211
- ),
212
- GlueConfig(
213
- name="qqp",
214
- description=textwrap.dedent(
215
- """\
216
- The Quora Question Pairs2 dataset is a collection of question pairs from the
217
- community question-answering website Quora. The task is to determine whether a
218
- pair of questions are semantically equivalent."""
219
- ),
220
- text_features={
221
- "question1": "question1",
222
- "question2": "question2",
223
- },
224
- label_classes=["not_duplicate", "duplicate"],
225
- label_column="is_duplicate",
226
- data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
227
- data_dir="QQP",
228
- citation=textwrap.dedent(
229
- """\
230
- @online{WinNT,
231
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
232
- title = {First Quora Dataset Release: Question Pairs},
233
- year = {2017},
234
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
235
- urldate = {2019-04-03}
236
- }"""
237
- ),
238
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
239
- ),
240
- GlueConfig(
241
- name="stsb",
242
- description=textwrap.dedent(
243
- """\
244
- The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of
245
- sentence pairs drawn from news headlines, video and image captions, and natural
246
- language inference data. Each pair is human-annotated with a similarity score
247
- from 1 to 5."""
248
- ),
249
- text_features={
250
- "sentence1": "sentence1",
251
- "sentence2": "sentence2",
252
- },
253
- label_column="score",
254
- data_url="https://dl.fbaipublicfiles.com/glue/data/STS-B.zip",
255
- data_dir="STS-B",
256
- citation=textwrap.dedent(
257
- """\
258
- @article{cer2017semeval,
259
- title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation},
260
- author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia},
261
- journal={arXiv preprint arXiv:1708.00055},
262
- year={2017}
263
- }"""
264
- ),
265
- url="http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark",
266
- process_label=np.float32,
267
- ),
268
- GlueConfig(
269
- name="mnli",
270
- description=textwrap.dedent(
271
- """\
272
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
273
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
274
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
275
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
276
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
277
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
278
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
279
- the SNLI corpus as 550k examples of auxiliary training data."""
280
- ),
281
- **_MNLI_BASE_KWARGS,
282
- ),
283
- GlueConfig(
284
- name="mnli_mismatched",
285
- description=textwrap.dedent(
286
- """\
287
- The mismatched validation and test splits from MNLI.
288
- See the "mnli" BuilderConfig for additional information."""
289
- ),
290
- **_MNLI_BASE_KWARGS,
291
- ),
292
- GlueConfig(
293
- name="mnli_matched",
294
- description=textwrap.dedent(
295
- """\
296
- The matched validation and test splits from MNLI.
297
- See the "mnli" BuilderConfig for additional information."""
298
- ),
299
- **_MNLI_BASE_KWARGS,
300
- ),
301
- GlueConfig(
302
- name="qnli",
303
- description=textwrap.dedent(
304
- """\
305
- The Stanford Question Answering Dataset is a question-answering
306
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
307
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
308
- convert the task into sentence pair classification by forming a pair between each question and each
309
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
310
- question and the context sentence. The task is to determine whether the context sentence contains
311
- the answer to the question. This modified version of the original task removes the requirement that
312
- the model select the exact answer, but also removes the simplifying assumptions that the answer
313
- is always present in the input and that lexical overlap is a reliable cue."""
314
- ), # pylint: disable=line-too-long
315
- text_features={
316
- "question": "question",
317
- "sentence": "sentence",
318
- },
319
- label_classes=["entailment", "not_entailment"],
320
- label_column="label",
321
- data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
322
- data_dir="QNLI",
323
- citation=textwrap.dedent(
324
- """\
325
- @article{rajpurkar2016squad,
326
- title={Squad: 100,000+ questions for machine comprehension of text},
327
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
328
- journal={arXiv preprint arXiv:1606.05250},
329
- year={2016}
330
- }"""
331
- ),
332
- url="https://rajpurkar.github.io/SQuAD-explorer/",
333
- ),
334
- GlueConfig(
335
- name="rte",
336
- description=textwrap.dedent(
337
- """\
338
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
339
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
340
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
341
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
342
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
343
- ), # pylint: disable=line-too-long
344
- text_features={
345
- "sentence1": "sentence1",
346
- "sentence2": "sentence2",
347
- },
348
- label_classes=["entailment", "not_entailment"],
349
- label_column="label",
350
- data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
351
- data_dir="RTE",
352
- citation=textwrap.dedent(
353
- """\
354
- @inproceedings{dagan2005pascal,
355
- title={The PASCAL recognising textual entailment challenge},
356
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
357
- booktitle={Machine Learning Challenges Workshop},
358
- pages={177--190},
359
- year={2005},
360
- organization={Springer}
361
- }
362
- @inproceedings{bar2006second,
363
- title={The second pascal recognising textual entailment challenge},
364
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
365
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
366
- volume={6},
367
- number={1},
368
- pages={6--4},
369
- year={2006},
370
- organization={Venice}
371
- }
372
- @inproceedings{giampiccolo2007third,
373
- title={The third pascal recognizing textual entailment challenge},
374
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
375
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
376
- pages={1--9},
377
- year={2007},
378
- organization={Association for Computational Linguistics}
379
- }
380
- @inproceedings{bentivogli2009fifth,
381
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
382
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
383
- booktitle={TAC},
384
- year={2009}
385
- }"""
386
- ),
387
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
388
- ),
389
- GlueConfig(
390
- name="wnli",
391
- description=textwrap.dedent(
392
- """\
393
- The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task
394
- in which a system must read a sentence with a pronoun and select the referent of that pronoun from
395
- a list of choices. The examples are manually constructed to foil simple statistical methods: Each
396
- one is contingent on contextual information provided by a single word or phrase in the sentence.
397
- To convert the problem into sentence pair classification, we construct sentence pairs by replacing
398
- the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the
399
- pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of
400
- new examples derived from fiction books that was shared privately by the authors of the original
401
- corpus. While the included training set is balanced between two classes, the test set is imbalanced
402
- between them (65% not entailment). Also, due to a data quirk, the development set is adversarial:
403
- hypotheses are sometimes shared between training and development examples, so if a model memorizes the
404
- training examples, they will predict the wrong label on corresponding development set
405
- example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence
406
- between a model's score on this task and its score on the unconverted original task. We
407
- call converted dataset WNLI (Winograd NLI)."""
408
- ),
409
- text_features={
410
- "sentence1": "sentence1",
411
- "sentence2": "sentence2",
412
- },
413
- label_classes=["not_entailment", "entailment"],
414
- label_column="label",
415
- data_url="https://dl.fbaipublicfiles.com/glue/data/WNLI.zip",
416
- data_dir="WNLI",
417
- citation=textwrap.dedent(
418
- """\
419
- @inproceedings{levesque2012winograd,
420
- title={The winograd schema challenge},
421
- author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora},
422
- booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning},
423
- year={2012}
424
- }"""
425
- ),
426
- url="https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html",
427
- ),
428
- GlueConfig(
429
- name="ax",
430
- description=textwrap.dedent(
431
- """\
432
- A manually-curated evaluation dataset for fine-grained analysis of
433
- system performance on a broad range of linguistic phenomena. This
434
- dataset evaluates sentence understanding through Natural Language
435
- Inference (NLI) problems. Use a model trained on MulitNLI to produce
436
- predictions for this dataset."""
437
- ),
438
- text_features={
439
- "premise": "sentence1",
440
- "hypothesis": "sentence2",
441
- },
442
- label_classes=["entailment", "neutral", "contradiction"],
443
- label_column="", # No label since we only have test set.
444
- # We must use a URL shortener since the URL from GLUE is very long and
445
- # causes issues in TFDS.
446
- data_url="https://dl.fbaipublicfiles.com/glue/data/AX.tsv",
447
- data_dir="", # We are downloading a tsv.
448
- citation="", # The GLUE citation is sufficient.
449
- url="https://gluebenchmark.com/diagnostics",
450
- ),
451
- ]
452
-
453
- def _info(self):
454
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
455
- if self.config.label_classes:
456
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
457
- else:
458
- features["label"] = datasets.Value("float32")
459
- features["idx"] = datasets.Value("int32")
460
- return datasets.DatasetInfo(
461
- description=_GLUE_DESCRIPTION,
462
- features=datasets.Features(features),
463
- homepage=self.config.url,
464
- citation=self.config.citation + "\n" + _GLUE_CITATION,
465
- )
466
-
467
- def _split_generators(self, dl_manager):
468
- if self.config.name == "ax":
469
- data_file = dl_manager.download(self.config.data_url)
470
- return [
471
- datasets.SplitGenerator(
472
- name=datasets.Split.TEST,
473
- gen_kwargs={
474
- "data_file": data_file,
475
- "split": "test",
476
- },
477
- )
478
- ]
479
-
480
- if self.config.name == "mrpc":
481
- data_dir = None
482
- mrpc_files = dl_manager.download(
483
- {
484
- "dev_ids": _MRPC_DEV_IDS,
485
- "train": _MRPC_TRAIN,
486
- "test": _MRPC_TEST,
487
- }
488
- )
489
- else:
490
- dl_dir = dl_manager.download_and_extract(self.config.data_url)
491
- data_dir = os.path.join(dl_dir, self.config.data_dir)
492
- mrpc_files = None
493
- train_split = datasets.SplitGenerator(
494
- name=datasets.Split.TRAIN,
495
- gen_kwargs={
496
- "data_file": os.path.join(data_dir or "", "train.tsv"),
497
- "split": "train",
498
- "mrpc_files": mrpc_files,
499
- },
500
- )
501
- if self.config.name == "mnli":
502
- return [
503
- train_split,
504
- _mnli_split_generator("validation_matched", data_dir, "dev", matched=True),
505
- _mnli_split_generator("validation_mismatched", data_dir, "dev", matched=False),
506
- _mnli_split_generator("test_matched", data_dir, "test", matched=True),
507
- _mnli_split_generator("test_mismatched", data_dir, "test", matched=False),
508
- ]
509
- elif self.config.name == "mnli_matched":
510
- return [
511
- _mnli_split_generator("validation", data_dir, "dev", matched=True),
512
- _mnli_split_generator("test", data_dir, "test", matched=True),
513
- ]
514
- elif self.config.name == "mnli_mismatched":
515
- return [
516
- _mnli_split_generator("validation", data_dir, "dev", matched=False),
517
- _mnli_split_generator("test", data_dir, "test", matched=False),
518
- ]
519
- else:
520
- return [
521
- train_split,
522
- datasets.SplitGenerator(
523
- name=datasets.Split.VALIDATION,
524
- gen_kwargs={
525
- "data_file": os.path.join(data_dir or "", "dev.tsv"),
526
- "split": "dev",
527
- "mrpc_files": mrpc_files,
528
- },
529
- ),
530
- datasets.SplitGenerator(
531
- name=datasets.Split.TEST,
532
- gen_kwargs={
533
- "data_file": os.path.join(data_dir or "", "test.tsv"),
534
- "split": "test",
535
- "mrpc_files": mrpc_files,
536
- },
537
- ),
538
- ]
539
-
540
- def _generate_examples(self, data_file, split, mrpc_files=None):
541
- if self.config.name == "mrpc":
542
- # We have to prepare the MRPC dataset from the original sources ourselves.
543
- examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
544
- for example in examples:
545
- yield example["idx"], example
546
- else:
547
- process_label = self.config.process_label
548
- label_classes = self.config.label_classes
549
-
550
- # The train and dev files for CoLA are the only tsv files without a
551
- # header.
552
- is_cola_non_test = self.config.name == "cola" and split != "test"
553
-
554
- with open(data_file, encoding="utf8") as f:
555
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
556
- if is_cola_non_test:
557
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
558
-
559
- for n, row in enumerate(reader):
560
- if is_cola_non_test:
561
- row = {
562
- "sentence": row[3],
563
- "is_acceptable": row[1],
564
- }
565
-
566
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
567
- example["idx"] = n
568
-
569
- if self.config.label_column in row:
570
- label = row[self.config.label_column]
571
- # For some tasks, the label is represented as 0 and 1 in the tsv
572
- # files and needs to be cast to integer to work with the feature.
573
- if label_classes and label not in label_classes:
574
- label = int(label) if label else None
575
- example["label"] = process_label(label)
576
- else:
577
- example["label"] = process_label(-1)
578
-
579
- # Filter out corrupted rows.
580
- for value in example.values():
581
- if value is None:
582
- break
583
- else:
584
- yield example["idx"], example
585
-
586
- def _generate_example_mrpc_files(self, mrpc_files, split):
587
- if split == "test":
588
- with open(mrpc_files["test"], encoding="utf8") as f:
589
- # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
590
- # the Quality key.
591
- f.seek(3)
592
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
593
- for n, row in enumerate(reader):
594
- yield {
595
- "sentence1": row["#1 String"],
596
- "sentence2": row["#2 String"],
597
- "label": int(row["Quality"]),
598
- "idx": n,
599
- }
600
- else:
601
- with open(mrpc_files["dev_ids"], encoding="utf8") as f:
602
- reader = csv.reader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
603
- dev_ids = [[row[0], row[1]] for row in reader]
604
- with open(mrpc_files["train"], encoding="utf8") as f:
605
- # The first 3 bytes are the utf-8 BOM \xef\xbb\xbf, which messes with
606
- # the Quality key.
607
- f.seek(3)
608
- reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
609
- for n, row in enumerate(reader):
610
- is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
611
- if is_row_in_dev == (split == "dev"):
612
- yield {
613
- "sentence1": row["#1 String"],
614
- "sentence2": row["#2 String"],
615
- "label": int(row["Quality"]),
616
- "idx": n,
617
- }
618
-
619
-
620
- def _mnli_split_generator(name, data_dir, split, matched):
621
- return datasets.SplitGenerator(
622
- name=name,
623
- gen_kwargs={
624
- "data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
625
- "split": split,
626
- "mrpc_files": None,
627
- },
628
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mnli/test_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:260fbb26acee4e781e8bbe41d41356b129d8a4fd3f485ac764c928725c49c454
3
+ size 1212996
mnli/test_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfac0b6d17a6ac08d07a86a8ac8d95e92e5dbe28f4512801b685f543a6ec6e09
3
+ size 1251702
mnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad52fb2458a4e192c97bcab4b3574a8b7a0a59dc92d6d757ff64f7fd9fc88fbf
3
+ size 51981599
mnli/validation_matched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a665d08615288ea2e6df41320f6e7dd70083c42d13bc0597c97c818b5c9c1aa5
3
+ size 1208425
mnli/validation_mismatched-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae79dd1ea4a4eefa96309034a94071bdac58340fd98581fbb303afa3b157b87
3
+ size 1244865
mnli_matched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:260fbb26acee4e781e8bbe41d41356b129d8a4fd3f485ac764c928725c49c454
3
+ size 1212996
mnli_matched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a665d08615288ea2e6df41320f6e7dd70083c42d13bc0597c97c818b5c9c1aa5
3
+ size 1208425
mnli_mismatched/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfac0b6d17a6ac08d07a86a8ac8d95e92e5dbe28f4512801b685f543a6ec6e09
3
+ size 1251702
mnli_mismatched/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ae79dd1ea4a4eefa96309034a94071bdac58340fd98581fbb303afa3b157b87
3
+ size 1244865
mrpc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bc4e66455101cc82afe92efe3cd387ebacb92d1585e05aa6eaa24068f58fffb
3
+ size 306799
mrpc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e894b7fc5f0cdb3caa270b40089a6d4776e4dad966359109ca3c7dfec4153dd
3
+ size 646414
dummy/mnli/1.0.0/dummy_data.zip → mrpc/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef46936124ebde31577df53b2ae6e381aa9c66e95a2cf50f42ba68478ec3896e
3
- size 5438
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cc82c8051a9d41b5b4c57fe7f1ed1bda8a1fbd0febb620724c348167c907cf9
3
+ size 74899
qnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6433b32f6a0bc104f023eab938d9a087f7776af8a90a82beb36d0f5ebde84411
3
+ size 872949
qnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31a832f5fe31b9944a643bcb47f8c34efc5e770573ccb41d5c81e5ed0d71c816
3
+ size 17449932
qnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:868ef7832318910590393e9376088710be6f9f2eeb1d77f1e890b8e174c51a87
3
+ size 867862
qqp/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d886a8ca34b9fe150242adb939f687cf3848a203b385cdd218595e05031b5373
3
+ size 36440953
qqp/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbbfa05598e7cea0a6be2eb7272313eb5a8714a31736ab849b818f929b781f1
3
+ size 33327465
qqp/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b8bada5edeed8e6e81bb8aade05badd2195475eb7fca315c1dcdc0e710a866
3
+ size 3703670
rte/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6cad83b691b7df6b73738a91111a0e196a294ab3988b3fb261a0bc7a455af0d
3
+ size 618851
rte/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adc4c0a0252a64a75b101f5e73bd5c1511864580763ddd4fb48c429e59f2dde2
3
+ size 580730
rte/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c0487bc75ae68a5d7458807b57973c656ae9c47c64a114a8b01002226fddf4a
3
+ size 67569
sst2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d19212aaf09eca70f0bb8766ca4556bd54291b14ded2b4fc0efc2d74ebfd5cab
3
+ size 146921
sst2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c9228cab12cf54f2727ec20e269de359d6cf29511eba3b879c52b746681f45
3
+ size 3085870
sst2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca1fcdf1e78bf5325982bbbdaebe432d96727ff1c41eb27c462407fa100e13c
3
+ size 72372
stsb/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0c58c00eebf43f70cd4668fadadf5e541415bd9fb95809fc958690d8eeea07c
3
+ size 113005
stsb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e63fac541b6227ea1e7a3335f49c1772dcc8d58a953cb3a8f1c4e6c0daf2db2
3
+ size 498786
stsb/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9495d89f7272d26d28e2d446c962258a499cf38a26df0925b4823cf992f7808a
3
+ size 149444
wnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f6607b5400cfc068ab613dd2ceadbdddf2c85aa56b8694d602e4612a170cab
3
+ size 12837
wnli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69539c43069eb3f052f8b86abe7ca2cd4f23645e152f044e7d19d1e107458588
3
+ size 38011
wnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3945356789e3b1b488f5469beb230abd2c5d4b79ec3319048ce63ab405eb14eb
3
+ size 10402