lhoestq HF Staff commited on
Commit
2f8f549
·
verified ·
1 Parent(s): d4777a0

Add 'mrpc' config data files

Browse files
README.md CHANGED
@@ -48,6 +48,32 @@ dataset_info:
48
  num_examples: 1063
49
  download_size: 322394
50
  dataset_size: 605704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  - config_name: sst2
52
  features:
53
  - name: sentence
@@ -81,6 +107,14 @@ configs:
81
  path: cola/validation-*
82
  - split: test
83
  path: cola/test-*
 
 
 
 
 
 
 
 
84
  - config_name: sst2
85
  data_files:
86
  - split: train
 
48
  num_examples: 1063
49
  download_size: 322394
50
  dataset_size: 605704
51
+ - config_name: mrpc
52
+ features:
53
+ - name: sentence1
54
+ dtype: string
55
+ - name: sentence2
56
+ dtype: string
57
+ - name: label
58
+ dtype:
59
+ class_label:
60
+ names:
61
+ '0': not_equivalent
62
+ '1': equivalent
63
+ - name: idx
64
+ dtype: int32
65
+ splits:
66
+ - name: train
67
+ num_bytes: 943843
68
+ num_examples: 3668
69
+ - name: validation
70
+ num_bytes: 105879
71
+ num_examples: 408
72
+ - name: test
73
+ num_bytes: 442410
74
+ num_examples: 1725
75
+ download_size: 1028112
76
+ dataset_size: 1492132
77
  - config_name: sst2
78
  features:
79
  - name: sentence
 
107
  path: cola/validation-*
108
  - split: test
109
  path: cola/test-*
110
+ - config_name: mrpc
111
+ data_files:
112
+ - split: train
113
+ path: mrpc/train-*
114
+ - split: validation
115
+ path: mrpc/validation-*
116
+ - split: test
117
+ path: mrpc/test-*
118
  - config_name: sst2
119
  data_files:
120
  - split: train
dataset_infos.json CHANGED
@@ -113,39 +113,32 @@
113
  },
114
  "mrpc": {
115
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
116
- "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
117
  "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
118
  "license": "",
119
  "features": {
120
  "sentence1": {
121
  "dtype": "string",
122
- "id": null,
123
  "_type": "Value"
124
  },
125
  "sentence2": {
126
  "dtype": "string",
127
- "id": null,
128
  "_type": "Value"
129
  },
130
  "label": {
131
- "num_classes": 2,
132
  "names": [
133
  "not_equivalent",
134
  "equivalent"
135
  ],
136
- "names_file": null,
137
- "id": null,
138
  "_type": "ClassLabel"
139
  },
140
  "idx": {
141
  "dtype": "int32",
142
- "id": null,
143
  "_type": "Value"
144
  }
145
  },
146
- "post_processed": null,
147
- "supervised_keys": null,
148
- "builder_name": "glue",
149
  "config_name": "mrpc",
150
  "version": {
151
  "version_str": "1.0.0",
@@ -155,43 +148,28 @@
155
  "patch": 0
156
  },
157
  "splits": {
158
- "test": {
159
- "name": "test",
160
- "num_bytes": 443498,
161
- "num_examples": 1725,
162
- "dataset_name": "glue"
163
- },
164
  "train": {
165
  "name": "train",
166
- "num_bytes": 946146,
167
  "num_examples": 3668,
168
- "dataset_name": "glue"
169
  },
170
  "validation": {
171
  "name": "validation",
172
- "num_bytes": 106142,
173
  "num_examples": 408,
174
- "dataset_name": "glue"
175
- }
176
- },
177
- "download_checksums": {
178
- "https://dl.fbaipublicfiles.com/glue/data/mrpc_dev_ids.tsv": {
179
- "num_bytes": 6222,
180
- "checksum": "971d7767d81b997fd9060ade0ec23c4fc31cbb226a55d1bd4a1bac474eb81dc7"
181
- },
182
- "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_train.txt": {
183
- "num_bytes": 1047044,
184
- "checksum": "60a9b09084528f0673eedee2b69cb941920f0b8cd0eeccefc464a98768457f89"
185
  },
186
- "https://dl.fbaipublicfiles.com/senteval/senteval_data/msr_paraphrase_test.txt": {
187
- "num_bytes": 441275,
188
- "checksum": "a04e271090879aaba6423d65b94950c089298587d9c084bf9cd7439bd785f784"
 
 
189
  }
190
  },
191
- "download_size": 1494541,
192
- "post_processing_size": null,
193
- "dataset_size": 1495786,
194
- "size_in_bytes": 2990327
195
  },
196
  "qqp": {
197
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
113
  },
114
  "mrpc": {
115
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
116
+ "citation": "@inproceedings{dolan2005automatically,\n title={Automatically constructing a corpus of sentential paraphrases},\n author={Dolan, William B and Brockett, Chris},\n booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)},\n year={2005}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
117
  "homepage": "https://www.microsoft.com/en-us/download/details.aspx?id=52398",
118
  "license": "",
119
  "features": {
120
  "sentence1": {
121
  "dtype": "string",
 
122
  "_type": "Value"
123
  },
124
  "sentence2": {
125
  "dtype": "string",
 
126
  "_type": "Value"
127
  },
128
  "label": {
 
129
  "names": [
130
  "not_equivalent",
131
  "equivalent"
132
  ],
 
 
133
  "_type": "ClassLabel"
134
  },
135
  "idx": {
136
  "dtype": "int32",
 
137
  "_type": "Value"
138
  }
139
  },
140
+ "builder_name": "glue-ci",
141
+ "dataset_name": "glue-ci",
 
142
  "config_name": "mrpc",
143
  "version": {
144
  "version_str": "1.0.0",
 
148
  "patch": 0
149
  },
150
  "splits": {
 
 
 
 
 
 
151
  "train": {
152
  "name": "train",
153
+ "num_bytes": 943843,
154
  "num_examples": 3668,
155
+ "dataset_name": null
156
  },
157
  "validation": {
158
  "name": "validation",
159
+ "num_bytes": 105879,
160
  "num_examples": 408,
161
+ "dataset_name": null
 
 
 
 
 
 
 
 
 
 
162
  },
163
+ "test": {
164
+ "name": "test",
165
+ "num_bytes": 442410,
166
+ "num_examples": 1725,
167
+ "dataset_name": null
168
  }
169
  },
170
+ "download_size": 1028112,
171
+ "dataset_size": 1492132,
172
+ "size_in_bytes": 2520244
 
173
  },
174
  "qqp": {
175
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
mrpc/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bc4e66455101cc82afe92efe3cd387ebacb92d1585e05aa6eaa24068f58fffb
3
+ size 306799
mrpc/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e894b7fc5f0cdb3caa270b40089a6d4776e4dad966359109ca3c7dfec4153dd
3
+ size 646414
mrpc/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cc82c8051a9d41b5b4c57fe7f1ed1bda8a1fbd0febb620724c348167c907cf9
3
+ size 74899