lhoestq HF Staff commited on
Commit
d4777a0
·
verified ·
1 Parent(s): fa92a28

Add 'sst2' config data files

Browse files
README.md CHANGED
@@ -24,7 +24,7 @@ task_ids:
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
  dataset_info:
27
- config_name: cola
28
  features:
29
  - name: sentence
30
  dtype: string
@@ -48,6 +48,30 @@ dataset_info:
48
  num_examples: 1063
49
  download_size: 322394
50
  dataset_size: 605704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  configs:
52
  - config_name: cola
53
  data_files:
@@ -57,6 +81,14 @@ configs:
57
  path: cola/validation-*
58
  - split: test
59
  path: cola/test-*
 
 
 
 
 
 
 
 
60
  train-eval-index:
61
  - config: cola
62
  task: text-classification
 
24
  paperswithcode_id: glue
25
  pretty_name: GLUE (General Language Understanding Evaluation benchmark)
26
  dataset_info:
27
+ - config_name: cola
28
  features:
29
  - name: sentence
30
  dtype: string
 
48
  num_examples: 1063
49
  download_size: 322394
50
  dataset_size: 605704
51
+ - config_name: sst2
52
+ features:
53
+ - name: sentence
54
+ dtype: string
55
+ - name: label
56
+ dtype:
57
+ class_label:
58
+ names:
59
+ '0': negative
60
+ '1': positive
61
+ - name: idx
62
+ dtype: int32
63
+ splits:
64
+ - name: train
65
+ num_bytes: 4681603
66
+ num_examples: 67349
67
+ - name: validation
68
+ num_bytes: 106252
69
+ num_examples: 872
70
+ - name: test
71
+ num_bytes: 216640
72
+ num_examples: 1821
73
+ download_size: 3305163
74
+ dataset_size: 5004495
75
  configs:
76
  - config_name: cola
77
  data_files:
 
81
  path: cola/validation-*
82
  - split: test
83
  path: cola/test-*
84
+ - config_name: sst2
85
+ data_files:
86
+ - split: train
87
+ path: sst2/train-*
88
+ - split: validation
89
+ path: sst2/validation-*
90
+ - split: test
91
+ path: sst2/test-*
92
  train-eval-index:
93
  - config: cola
94
  task: text-classification
dataset_infos.json CHANGED
@@ -57,34 +57,28 @@
57
  },
58
  "sst2": {
59
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
60
- "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n\nNote that each GLUE dataset has its own citation. Please see the source to see\nthe correct citation for each contained dataset.",
61
- "homepage": "https://nlp.stanford.edu/sentiment/index.html",
62
  "license": "",
63
  "features": {
64
  "sentence": {
65
  "dtype": "string",
66
- "id": null,
67
  "_type": "Value"
68
  },
69
  "label": {
70
- "num_classes": 2,
71
  "names": [
72
  "negative",
73
  "positive"
74
  ],
75
- "names_file": null,
76
- "id": null,
77
  "_type": "ClassLabel"
78
  },
79
  "idx": {
80
  "dtype": "int32",
81
- "id": null,
82
  "_type": "Value"
83
  }
84
  },
85
- "post_processed": null,
86
- "supervised_keys": null,
87
- "builder_name": "glue",
88
  "config_name": "sst2",
89
  "version": {
90
  "version_str": "1.0.0",
@@ -94,35 +88,28 @@
94
  "patch": 0
95
  },
96
  "splits": {
97
- "test": {
98
- "name": "test",
99
- "num_bytes": 217556,
100
- "num_examples": 1821,
101
- "dataset_name": "glue"
102
- },
103
  "train": {
104
  "name": "train",
105
- "num_bytes": 4715283,
106
  "num_examples": 67349,
107
- "dataset_name": "glue"
108
  },
109
  "validation": {
110
  "name": "validation",
111
- "num_bytes": 106692,
112
  "num_examples": 872,
113
- "dataset_name": "glue"
114
- }
115
- },
116
- "download_checksums": {
117
- "https://dl.fbaipublicfiles.com/glue/data/SST-2.zip": {
118
- "num_bytes": 7439277,
119
- "checksum": "d67e16fb55739c1b32cdce9877596db1c127dc322d93c082281f64057c16deaa"
120
  }
121
  },
122
- "download_size": 7439277,
123
- "post_processing_size": null,
124
- "dataset_size": 5039531,
125
- "size_in_bytes": 12478808
126
  },
127
  "mrpc": {
128
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
 
57
  },
58
  "sst2": {
59
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
60
+ "citation": "@inproceedings{socher2013recursive,\n title={Recursive deep models for semantic compositionality over a sentiment treebank},\n author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},\n booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},\n pages={1631--1642},\n year={2013}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
61
+ "homepage": "https://datasets.stanford.edu/sentiment/index.html",
62
  "license": "",
63
  "features": {
64
  "sentence": {
65
  "dtype": "string",
 
66
  "_type": "Value"
67
  },
68
  "label": {
 
69
  "names": [
70
  "negative",
71
  "positive"
72
  ],
 
 
73
  "_type": "ClassLabel"
74
  },
75
  "idx": {
76
  "dtype": "int32",
 
77
  "_type": "Value"
78
  }
79
  },
80
+ "builder_name": "glue-ci",
81
+ "dataset_name": "glue-ci",
 
82
  "config_name": "sst2",
83
  "version": {
84
  "version_str": "1.0.0",
 
88
  "patch": 0
89
  },
90
  "splits": {
 
 
 
 
 
 
91
  "train": {
92
  "name": "train",
93
+ "num_bytes": 4681603,
94
  "num_examples": 67349,
95
+ "dataset_name": null
96
  },
97
  "validation": {
98
  "name": "validation",
99
+ "num_bytes": 106252,
100
  "num_examples": 872,
101
+ "dataset_name": null
102
+ },
103
+ "test": {
104
+ "name": "test",
105
+ "num_bytes": 216640,
106
+ "num_examples": 1821,
107
+ "dataset_name": null
108
  }
109
  },
110
+ "download_size": 3305163,
111
+ "dataset_size": 5004495,
112
+ "size_in_bytes": 8309658
 
113
  },
114
  "mrpc": {
115
  "description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
sst2/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d19212aaf09eca70f0bb8766ca4556bd54291b14ded2b4fc0efc2d74ebfd5cab
3
+ size 146921
sst2/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c9228cab12cf54f2727ec20e269de359d6cf29511eba3b879c52b746681f45
3
+ size 3085870
sst2/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca1fcdf1e78bf5325982bbbdaebe432d96727ff1c41eb27c462407fa100e13c
3
+ size 72372