Datasets:

Modalities:
Text
Formats:
parquet
Languages:
code
Size:
< 1K
ArXiv:
Tags:
code
Libraries:
Datasets
pandas
License:
arjunguha commited on
Commit
7c7dd83
·
verified ·
1 Parent(s): 6f18c8d

Delete loading script

Browse files
Files changed (1) hide show
  1. humanevalpack.py +0 -151
humanevalpack.py DELETED
@@ -1,151 +0,0 @@
1
- import json
2
-
3
- import datasets
4
-
5
-
6
- _DESCRIPTION = """
7
- """
8
-
9
- _HOMEPAGE = "https://github.com/bigcode-project/octopack"
10
-
11
- def get_url(name):
12
- url = f"data/{name}/data/humanevalpack.jsonl"
13
- return url
14
-
15
- def split_generator(dl_manager, name):
16
- downloaded_files = dl_manager.download(get_url(name))
17
- return [
18
- datasets.SplitGenerator(
19
- name=datasets.Split.TEST,
20
- gen_kwargs={
21
- "filepath": downloaded_files,
22
- },
23
- )
24
- ]
25
-
26
- class HumanEvalPackConfig(datasets.BuilderConfig):
27
- """BuilderConfig """
28
-
29
- def __init__(self, name, description, features, **kwargs):
30
- super(HumanEvalPackConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
31
- self.name = name
32
- self.description = description
33
- self.features = features
34
-
35
-
36
- class HumanEvalPack(datasets.GeneratorBasedBuilder):
37
- VERSION = datasets.Version("1.0.0")
38
- BUILDER_CONFIGS = [
39
- HumanEvalPackConfig(
40
- name="python",
41
- description="Python HumanEvalPack",
42
- features=[
43
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
44
- ]
45
- ),
46
- HumanEvalPackConfig(
47
- name="js",
48
- description="JavaScript HumanEvalPack",
49
- features=[
50
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
51
- ]
52
- ),
53
- HumanEvalPackConfig(
54
- name="java",
55
- description="Java HumanEvalPack",
56
- features=[
57
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
58
- ]
59
- ),
60
- HumanEvalPackConfig(
61
- name="go",
62
- description="Go HumanEvalPack",
63
- features=[
64
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
65
- ]
66
- ),
67
- HumanEvalPackConfig(
68
- name="cpp",
69
- description="C++ HumanEvalPack",
70
- features=[
71
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
72
- ]
73
- ),
74
- HumanEvalPackConfig(
75
- name="rust",
76
- description="Rust HumanEvalPack",
77
- features=[
78
- "task_id", "prompt", "declaration", "canonical_solution", "buggy_solution", "bug_type", "failure_symptoms", "import", "test_setup", "test", "example_test", "entry_point", "signature", "docstring", "instruction"
79
- ]
80
- ),
81
- ]
82
- DEFAULT_CONFIG_NAME = "python"
83
-
84
- def _info(self):
85
- return datasets.DatasetInfo(
86
- description=_DESCRIPTION,
87
- features=datasets.Features(
88
- {
89
- "task_id": datasets.Value("string"),
90
- "prompt": datasets.Value("string"),
91
- "declaration": datasets.Value("string"),
92
- "canonical_solution": datasets.Value("string"),
93
- "buggy_solution": datasets.Value("string"),
94
- "bug_type": datasets.Value("string"),
95
- "failure_symptoms": datasets.Value("string"),
96
- "entry_point": datasets.Value("string"),
97
- "import": datasets.Value("string"),
98
- "test_setup": datasets.Value("string"),
99
- "test": datasets.Value("string"),
100
- "example_test": datasets.Value("string"),
101
- "signature": datasets.Value("string"),
102
- "docstring": datasets.Value("string"),
103
- "instruction": datasets.Value("string"),
104
- }
105
- ),
106
- homepage=_HOMEPAGE,
107
- )
108
-
109
- def _split_generators(self, dl_manager):
110
- if self.config.name == "python":
111
- return split_generator(dl_manager, self.config.name)
112
-
113
- elif self.config.name == "cpp":
114
- return split_generator(dl_manager, self.config.name)
115
-
116
- elif self.config.name == "go":
117
- return split_generator(dl_manager, self.config.name)
118
-
119
- elif self.config.name == "java":
120
- return split_generator(dl_manager, self.config.name)
121
-
122
- elif self.config.name == "js":
123
- return split_generator(dl_manager, self.config.name)
124
-
125
- elif self.config.name == "rust":
126
- return split_generator(dl_manager, self.config.name)
127
-
128
- def _generate_examples(self, filepath):
129
- key = 0
130
- with open(filepath) as f:
131
- for line in f:
132
- row = json.loads(line)
133
- key += 1
134
- yield key, {
135
- "task_id": row["task_id"],
136
- "prompt": row["prompt"],
137
- "declaration": row["declaration"],
138
- "canonical_solution": row["canonical_solution"],
139
- "buggy_solution": row["buggy_solution"],
140
- "bug_type": row["bug_type"],
141
- "failure_symptoms": row["failure_symptoms"],
142
- "import": row.get("import", ""), # Only for Go
143
- "test_setup": row.get("test_setup", ""), # Only for Go
144
- "test": row["test"],
145
- "example_test": row["example_test"],
146
- "entry_point": row["entry_point"],
147
- "signature": row["signature"],
148
- "docstring": row["docstring"],
149
- "instruction": row["instruction"],
150
- }
151
- key += 1