Datasets:

Languages:
English
License:
gabrielaltay commited on
Commit
21623b9
·
1 Parent(s): b10b8ae

upload hubscripts/bioasq_task_c_2017_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. bioasq_task_c_2017.py +220 -0
bioasq_task_c_2017.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ import xml.etree.ElementTree as ET
19
+ from dataclasses import dataclass
20
+ from typing import List
21
+
22
+ import datasets
23
+
24
+ from .bigbiohub import text.features
25
+ from .bigbiohub import BigBioConfig
26
+ from .bigbiohub import Tasks
27
+
28
+ _LANGUAGES = ['English']
29
+ _PUBMED = True
30
+ _LOCAL = True
31
+ _CITATION = """\
32
+ @article{nentidis-etal-2017-results,
33
+ title = {Results of the fifth edition of the {B}io{ASQ} Challenge},
34
+ author = {
35
+ Nentidis, Anastasios and Bougiatiotis, Konstantinos and Krithara,
36
+ Anastasia and Paliouras, Georgios and Kakadiaris, Ioannis
37
+ },
38
+ year = 2007,
39
+ journal = {},
40
+ volume = {BioNLP 2017},
41
+ doi = {10.18653/v1/W17-2306},
42
+ url = {https://aclanthology.org/W17-2306},
43
+ biburl = {},
44
+ bibsource = {https://aclanthology.org/W17-2306}
45
+ }
46
+
47
+ """
48
+
49
+ _DATASETNAME = "bioasq_task_c_2017"
50
+ _DISPLAYNAME = "BioASQ Task C 2017"
51
+
52
+ _DESCRIPTION = """\
53
+ The training data set for this task contains annotated biomedical articles
54
+ published in PubMed and corresponding full text from PMC. By annotated is meant
55
+ that GrantIDs and corresponding Grant Agencies have been identified in the full
56
+ text of articles
57
+ """
58
+
59
+ _HOMEPAGE = "http://participants-area.bioasq.org/general_information/Task5c/"
60
+
61
+ _LICENSE = 'National Library of Medicine Terms and Conditions'
62
+
63
+ # Website contains all data, but login required
64
+ _URLS = {_DATASETNAME: "http://participants-area.bioasq.org/datasets/"}
65
+
66
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
67
+
68
+ _SOURCE_VERSION = "1.0.0"
69
+ _BIGBIO_VERSION = "1.0.0"
70
+
71
+
72
+ @dataclass
73
+ class BioASQTaskC2017BigBioConfig(BigBioConfig):
74
+ schema: str = "source"
75
+ name: str = "bioasq_task_c_2017_source"
76
+ version: datasets.Version = datasets.Version(_SOURCE_VERSION)
77
+ description: str = "bioasq_task_c_2017 source schema"
78
+ subset_id: str = "bioasq_task_c_2017"
79
+
80
+
81
+ class BioASQTaskC2017(datasets.GeneratorBasedBuilder):
82
+ """
83
+ BioASQ Task C Dataset for 2017
84
+ """
85
+
86
+ DEFAULT_CONFIG_NAME = "bioasq_task_c_2017_source"
87
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
88
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
89
+
90
+ BUILDER_CONFIGS = [
91
+ BioASQTaskC2017BigBioConfig(
92
+ name="bioasq_task_c_2017_source",
93
+ version=SOURCE_VERSION,
94
+ description="bioasq_task_c_2017 source schema",
95
+ schema="source",
96
+ subset_id="bioasq_task_c_2017",
97
+ ),
98
+ BioASQTaskC2017BigBioConfig(
99
+ name="bioasq_task_c_2017_bigbio_text",
100
+ version=BIGBIO_VERSION,
101
+ description="bioasq_task_c_2017 BigBio schema",
102
+ schema="bigbio_text",
103
+ subset_id="bioasq_task_c_2017",
104
+ ),
105
+ ]
106
+
107
+ BUILDER_CONFIG_CLASS = BioASQTaskC2017BigBioConfig
108
+
109
+ def _info(self) -> datasets.DatasetInfo:
110
+
111
+ # BioASQ Task C source schema
112
+ if self.config.schema == "source":
113
+ features = datasets.Features(
114
+ {
115
+ "id": datasets.Value("string"),
116
+ "document_id": datasets.Value("string"),
117
+ "pmid": datasets.Value("string"),
118
+ "pmcid": datasets.Value("string"),
119
+ "grantList": [
120
+ {
121
+ "agency": datasets.Value("string"),
122
+ }
123
+ ],
124
+ "text": datasets.Value("string"),
125
+ }
126
+ )
127
+
128
+ # For example bigbio_kb, bigbio_t2t
129
+ elif self.config.schema == "bigbio_text":
130
+ features = text.features
131
+
132
+ return datasets.DatasetInfo(
133
+ description=_DESCRIPTION,
134
+ features=features,
135
+ homepage=_HOMEPAGE,
136
+ license=str(_LICENSE),
137
+ citation=_CITATION,
138
+ )
139
+
140
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
141
+
142
+ if self.config.data_dir is None:
143
+ raise ValueError(
144
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
145
+ )
146
+ else:
147
+ data_dir = self.config.data_dir
148
+
149
+ return [
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TRAIN,
152
+ gen_kwargs={
153
+ "filepath": os.path.join(data_dir, "taskCTrainingData2017.json"),
154
+ "filespath": os.path.join(data_dir, "Train_Text"),
155
+ "split": "train",
156
+ },
157
+ ),
158
+ datasets.SplitGenerator(
159
+ name=datasets.Split.TEST,
160
+ gen_kwargs={
161
+ "filepath": os.path.join(data_dir, "taskc_golden2.json"),
162
+ "filespath": os.path.join(data_dir, "Final_Text"),
163
+ "split": "test",
164
+ },
165
+ ),
166
+ ]
167
+
168
+ def _generate_examples(self, filepath, filespath, split):
169
+
170
+ with open(filepath) as f:
171
+ task_data = json.load(f)
172
+
173
+ if self.config.schema == "source":
174
+ for article in task_data["articles"]:
175
+
176
+ with open(filespath + "/" + article["pmcid"] + ".xml") as f:
177
+ text = f.read()
178
+ pmid = article["pmid"]
179
+
180
+ yield pmid, {
181
+ "text": text, # articles[pmid],
182
+ "document_id": pmid,
183
+ "id": str(pmid),
184
+ "pmid": pmid,
185
+ "pmcid": article["pmcid"],
186
+ "grantList": [
187
+ {"agency": grant["agency"]} for grant in article["grantList"]
188
+ ],
189
+ }
190
+
191
+ elif self.config.schema == "bigbio_text":
192
+
193
+ for article in task_data["articles"]:
194
+
195
+ with open(filespath + "/" + article["pmcid"] + ".xml") as f:
196
+ xml_string = f.read()
197
+
198
+ try:
199
+ article_body = ET.fromstring(xml_string).find("./article/body")
200
+ except ET.ParseError:
201
+
202
+ # PubMed XML might not contain namespace which results in parse error, add manually
203
+ xml_string = xml_string.replace(
204
+ "</pmc-articleset>",
205
+ # xlink namespace
206
+ '<article xmlns:xlink="http://www.w3.org/1999/xlink"' # mml namespace
207
+ ' xmlns:mml="http://www.w3.org/1998/Math/MathML"'
208
+ ' article-type="research-article">',
209
+ )
210
+ xml_string = xml_string + "</article></pmc-articleset>"
211
+ article_body = ET.fromstring(xml_string).find("./article/body")
212
+
213
+ text = ET.tostring(article_body, encoding="utf8", method="text")
214
+
215
+ yield article["pmid"], {
216
+ "text": text,
217
+ "id": str(article["pmid"]),
218
+ "document_id": article["pmid"],
219
+ "labels": [grant["agency"] for grant in article["grantList"]],
220
+ }