Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
SaylorTwift HF Staff commited on
Commit
2dc8f0b
·
verified ·
1 Parent(s): 7d775d2

Add 'business_ethics' config data files

Browse files
README.md CHANGED
@@ -150,6 +150,8 @@ dataset_info:
150
  features:
151
  - name: question
152
  dtype: string
 
 
153
  - name: choices
154
  sequence: string
155
  - name: answer
@@ -162,19 +164,19 @@ dataset_info:
162
  '3': D
163
  splits:
164
  - name: auxiliary_train
165
- num_bytes: 160601377
166
  num_examples: 99842
167
  - name: test
168
- num_bytes: 33252
169
  num_examples: 100
170
  - name: validation
171
- num_bytes: 3038
172
  num_examples: 11
173
  - name: dev
174
- num_bytes: 2190
175
  num_examples: 5
176
- download_size: 166184960
177
- dataset_size: 160639857
178
  - config_name: clinical_knowledge
179
  features:
180
  - name: question
@@ -1753,6 +1755,16 @@ configs:
1753
  path: astronomy/validation-*
1754
  - split: dev
1755
  path: astronomy/dev-*
 
 
 
 
 
 
 
 
 
 
1756
  ---
1757
 
1758
  # Dataset Card for MMLU
 
150
  features:
151
  - name: question
152
  dtype: string
153
+ - name: subject
154
+ dtype: string
155
  - name: choices
156
  sequence: string
157
  - name: answer
 
164
  '3': D
165
  splits:
166
  - name: auxiliary_train
167
+ num_bytes: 161000625
168
  num_examples: 99842
169
  - name: test
170
+ num_bytes: 35140
171
  num_examples: 100
172
  - name: validation
173
+ num_bytes: 3235
174
  num_examples: 11
175
  - name: dev
176
+ num_bytes: 2273
177
  num_examples: 5
178
+ download_size: 47193421
179
+ dataset_size: 161041273
180
  - config_name: clinical_knowledge
181
  features:
182
  - name: question
 
1755
  path: astronomy/validation-*
1756
  - split: dev
1757
  path: astronomy/dev-*
1758
+ - config_name: business_ethics
1759
+ data_files:
1760
+ - split: auxiliary_train
1761
+ path: business_ethics/auxiliary_train-*
1762
+ - split: test
1763
+ path: business_ethics/test-*
1764
+ - split: validation
1765
+ path: business_ethics/validation-*
1766
+ - split: dev
1767
+ path: business_ethics/dev-*
1768
  ---
1769
 
1770
  # Dataset Card for MMLU
business_ethics/auxiliary_train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2782fc860f57d9345a9233ab04f494b0af5ae85b893a27853f7014b14a3bd07
3
+ size 47163955
business_ethics/dev-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:809e98c869462d507c867b00c3232d9bfc31affb345c0a53adf2fc4568967296
3
+ size 4221
business_ethics/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99d34fcaf6a31f7462bf3fad2432903796a2f0672467c0cfde6e8608d4f10faf
3
+ size 20701
business_ethics/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb543d985898fd2630a67f02a449e5d3308ed91bc3ba52374ef74cab013cf0d
3
+ size 4544
dataset_infos.json CHANGED
@@ -217,39 +217,34 @@
217
  "features": {
218
  "question": {
219
  "dtype": "string",
220
- "id": null,
 
 
 
221
  "_type": "Value"
222
  },
223
  "choices": {
224
  "feature": {
225
  "dtype": "string",
226
- "id": null,
227
  "_type": "Value"
228
  },
229
- "length": -1,
230
- "id": null,
231
  "_type": "Sequence"
232
  },
233
  "answer": {
234
- "num_classes": 4,
235
  "names": [
236
  "A",
237
  "B",
238
  "C",
239
  "D"
240
  ],
241
- "id": null,
242
  "_type": "ClassLabel"
243
  }
244
  },
245
- "post_processed": null,
246
- "supervised_keys": null,
247
- "task_templates": null,
248
- "builder_name": "mmlu",
249
  "config_name": "business_ethics",
250
  "version": {
251
  "version_str": "1.0.0",
252
- "description": null,
253
  "major": 1,
254
  "minor": 0,
255
  "patch": 0
@@ -257,39 +252,32 @@
257
  "splits": {
258
  "auxiliary_train": {
259
  "name": "auxiliary_train",
260
- "num_bytes": 160601257,
261
  "num_examples": 99842,
262
- "dataset_name": "mmlu"
263
  },
264
  "test": {
265
  "name": "test",
266
- "num_bytes": 33240,
267
  "num_examples": 100,
268
- "dataset_name": "mmlu"
269
  },
270
  "validation": {
271
  "name": "validation",
272
- "num_bytes": 3026,
273
  "num_examples": 11,
274
- "dataset_name": "mmlu"
275
  },
276
  "dev": {
277
  "name": "dev",
278
- "num_bytes": 2178,
279
  "num_examples": 5,
280
- "dataset_name": "mmlu"
281
- }
282
- },
283
- "download_checksums": {
284
- "data.tar": {
285
- "num_bytes": 166184960,
286
- "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b"
287
  }
288
  },
289
- "download_size": 166184960,
290
- "post_processing_size": null,
291
- "dataset_size": 160639701,
292
- "size_in_bytes": 326824661
293
  },
294
  "clinical_knowledge": {
295
  "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",
 
217
  "features": {
218
  "question": {
219
  "dtype": "string",
220
+ "_type": "Value"
221
+ },
222
+ "subject": {
223
+ "dtype": "string",
224
  "_type": "Value"
225
  },
226
  "choices": {
227
  "feature": {
228
  "dtype": "string",
 
229
  "_type": "Value"
230
  },
 
 
231
  "_type": "Sequence"
232
  },
233
  "answer": {
 
234
  "names": [
235
  "A",
236
  "B",
237
  "C",
238
  "D"
239
  ],
 
240
  "_type": "ClassLabel"
241
  }
242
  },
243
+ "builder_name": "parquet",
244
+ "dataset_name": "mmlu",
 
 
245
  "config_name": "business_ethics",
246
  "version": {
247
  "version_str": "1.0.0",
 
248
  "major": 1,
249
  "minor": 0,
250
  "patch": 0
 
252
  "splits": {
253
  "auxiliary_train": {
254
  "name": "auxiliary_train",
255
+ "num_bytes": 161000625,
256
  "num_examples": 99842,
257
+ "dataset_name": null
258
  },
259
  "test": {
260
  "name": "test",
261
+ "num_bytes": 35140,
262
  "num_examples": 100,
263
+ "dataset_name": null
264
  },
265
  "validation": {
266
  "name": "validation",
267
+ "num_bytes": 3235,
268
  "num_examples": 11,
269
+ "dataset_name": null
270
  },
271
  "dev": {
272
  "name": "dev",
273
+ "num_bytes": 2273,
274
  "num_examples": 5,
275
+ "dataset_name": null
 
 
 
 
 
 
276
  }
277
  },
278
+ "download_size": 47193421,
279
+ "dataset_size": 161041273,
280
+ "size_in_bytes": 208234694
 
281
  },
282
  "clinical_knowledge": {
283
  "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n",