SaylorTwift HF Staff commited on
Commit
b28b82f
·
verified ·
1 Parent(s): cf1e17c

Delete loading script

Browse files
Files changed (1) hide show
  1. me_q_sum.py +0 -122
me_q_sum.py DELETED
@@ -1,122 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Covid Dialog dataset in English and Chinese"""
16
-
17
-
18
- import copy
19
- import os
20
- import re
21
- import textwrap
22
-
23
- import datasets
24
-
25
-
26
- # BibTeX citation
27
- _CITATION = """\
28
- @Inproceedings{MeQSum,
29
- author = {Asma {Ben Abacha} and Dina Demner-Fushman},
30
- title = {On the Summarization of Consumer Health Questions},
31
- booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, ACL 2019,
32
- Florence, Italy, July 28th - August 2},
33
- year = {2019},
34
- abstract = {Question understanding is one of the main challenges in question answering. In real world applications,
35
- users often submit natural language questions that are longer than needed and include peripheral information that
36
- increases the complexity of the question, leading to substantially more false positives in answer retrieval. In this
37
- paper, we study neural abstractive models for medical question summarization. We introduce the MeQSum corpus of
38
- 1,000 summarized consumer health questions. We explore data augmentation methods and evaluate state-of-the-art
39
- neural abstractive models on this new task. In particular, we show that semantic augmentation from question datasets
40
- improves the overall performance, and that pointer-generator networks outperform sequence-to-sequence attentional
41
- models on this task, with a ROUGE-1 score of 44.16%. We also present a detailed error analysis and discuss
42
- directions for improvement that are specific to question summarization.}}
43
- """
44
-
45
- # Official description of the dataset
46
- _DESCRIPTION = textwrap.dedent(
47
- """
48
- From "On the Summarization of Consumer Health Questions" (Abacha et al.), MeQSum is a corpus of 1,000 summarized
49
- consumer health questions.
50
-
51
- The following is an example from the dataset:
52
-
53
- Question:
54
- SUBJECT: inversion of long arm chromasome7 MESSAGE: My son has been diagnosed with inversion of long arm
55
- chromasome 7 and down syndrome . please could you give me information on the chromasome 7 please because
56
- our doctors have not yet mentioned it
57
-
58
- Summary:
59
- Where can I find information on chromosome 7?
60
- """
61
- )
62
-
63
- # Link to an official homepage for the dataset here
64
- _HOMEPAGE = "https://worksheets.codalab.org/rest/bundles/0xd98a53314314445b96b4d703bb2d8c8c/contents/blob/"
65
-
66
- _LICENSE = ""
67
-
68
-
69
- import datasets
70
- import os
71
- import json
72
-
73
-
74
- class MeQSum(datasets.GeneratorBasedBuilder):
75
- VERSION = datasets.Version("1.0.0")
76
-
77
- BUILDER_CONFIGS = [datasets.BuilderConfig(name="default", version=datasets.Version("1.0.0"), description=_DESCRIPTION)]
78
-
79
- def _info(self):
80
- features = datasets.Features(
81
- {
82
- "query": datasets.Value("string"),
83
- "answer": datasets.Value("string"),
84
- }
85
- )
86
- return datasets.DatasetInfo(
87
- description=f"Covid Dialogue dataset, as preprocessed and shuffled in HELM",
88
- features=features,
89
- homepage=_HOMEPAGE,
90
- license=_LICENSE,
91
- citation=_CITATION,
92
- )
93
-
94
- def _split_generators(self, dl_manager):
95
- test_target = dl_manager.download("test.source")
96
- test_source = dl_manager.download("test.source")
97
- train_source = dl_manager.download("train.source")
98
- train_target = dl_manager.download("train.target")
99
- val_source = dl_manager.download("val.source")
100
- val_target = dl_manager.download("val.target")
101
-
102
- return [
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TRAIN,
105
- gen_kwargs={"target": train_target, "source": train_source},
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.VALIDATION,
109
- gen_kwargs={"target": val_target, "source": val_source},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.TEST,
113
- gen_kwargs={"target": test_target, "source": test_source},
114
- ),
115
- ]
116
-
117
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
118
- def _generate_examples(self, source, target):
119
- with open(source, encoding="utf-8") as f_source:
120
- with open(target, encoding="utf-8") as f_target:
121
- for idx, (s, t) in enumerate(zip(f_source, f_target)):
122
- yield idx, {"query": s, "answer": t}