Datasets:

Languages:
Thai
ArXiv:
License:
holylovenia commited on
Commit
fbfeccc
·
verified ·
1 Parent(s): 8079c69

Upload wongnai_reviews.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. wongnai_reviews.py +116 -0
wongnai_reviews.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+ from pathlib import Path
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+
8
+ from seacrowd.utils import schemas
9
+ from seacrowd.utils.configs import SEACrowdConfig
10
+ from seacrowd.utils.constants import Licenses, Tasks
11
+
12
+ # no BibTeX citation
13
+ _CITATION = ""
14
+
15
+ _DATASETNAME = "wongnai_reviews"
16
+
17
+ _DESCRIPTION = """
18
+ Wongnai features over 200,000 restaurants, beauty salons, and spas across Thailand on its platform, with detailed
19
+ information about each merchant and user reviews. Its over two million registered users can search for what’s top rated
20
+ in Bangkok, follow their friends, upload photos, and do quick write-ups about the places they visit. Each write-up
21
+ (review) also comes with a rating score ranging from 1-5 stars. The task here is to create a rating prediction model
22
+ using only textual information.
23
+ """
24
+
25
+ _HOMEPAGE = "https://huggingface.co/datasets/wongnai_reviews"
26
+
27
+ _LANGUAGES = ["tha"]
28
+
29
+ _LICENSE = Licenses.LGPL_3_0.value
30
+
31
+ _LOCAL = False
32
+
33
+ _URLS = {_DATASETNAME: "https://archive.org/download/wongnai_reviews/wongnai_reviews_withtest.zip"}
34
+
35
+ _SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
36
+
37
+ _SOURCE_VERSION = "1.0.0"
38
+
39
+ _SEACROWD_VERSION = "2024.06.20"
40
+
41
+ _CLASSES = ["1", "2", "3", "4", "5"]
42
+
43
+
44
+ class WongnaiReviewsDataset(datasets.GeneratorBasedBuilder):
45
+ """WongnaiReviews consists reviews for over 200,000 restaurants, beauty salons, and spas across Thailand."""
46
+
47
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
48
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
49
+
50
+ BUILDER_CONFIGS = [
51
+ SEACrowdConfig(
52
+ name=f"{_DATASETNAME}_source",
53
+ version=SOURCE_VERSION,
54
+ description=f"{_DATASETNAME} source schema",
55
+ schema="source",
56
+ subset_id=_DATASETNAME,
57
+ ),
58
+ SEACrowdConfig(
59
+ name=f"{_DATASETNAME}_seacrowd_text",
60
+ version=SEACROWD_VERSION,
61
+ description=f"{_DATASETNAME} SEACrowd schema",
62
+ schema="seacrowd_text",
63
+ subset_id=_DATASETNAME,
64
+ ),
65
+ ]
66
+
67
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
68
+
69
+ def _info(self) -> datasets.DatasetInfo:
70
+ if self.config.schema == "source":
71
+ features = datasets.Features(
72
+ {
73
+ "review_body": datasets.Value("string"),
74
+ "star_rating": datasets.ClassLabel(names=_CLASSES),
75
+ }
76
+ )
77
+
78
+ elif self.config.schema == "seacrowd_text":
79
+ features = schemas.text_features(label_names=_CLASSES)
80
+
81
+ return datasets.DatasetInfo(
82
+ description=_DESCRIPTION,
83
+ features=features,
84
+ homepage=_HOMEPAGE,
85
+ license=_LICENSE,
86
+ citation=_CITATION,
87
+ )
88
+
89
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
90
+ """Returns SplitGenerators."""
91
+ urls = _URLS[_DATASETNAME]
92
+ data_dir = dl_manager.download_and_extract(urls)
93
+
94
+ return [
95
+ datasets.SplitGenerator(
96
+ name=datasets.Split.TRAIN,
97
+ gen_kwargs={"filepath": os.path.join(data_dir, "w_review_train.csv"), "split": "train"},
98
+ ),
99
+ datasets.SplitGenerator(
100
+ name=datasets.Split.TEST,
101
+ gen_kwargs={"filepath": os.path.join(data_dir, "w_review_test.csv"), "split": "test"},
102
+ ),
103
+ ]
104
+
105
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
106
+ if self.config.schema == "source":
107
+ with open(filepath, encoding="utf-8") as f:
108
+ spamreader = csv.reader(f, delimiter=";", quotechar='"')
109
+ for i, row in enumerate(spamreader):
110
+ yield i, {"review_body": row[0], "star_rating": row[1]}
111
+
112
+ elif self.config.schema == "seacrowd_text":
113
+ with open(filepath, encoding="utf-8") as f:
114
+ spamreader = csv.reader(f, delimiter=";", quotechar='"')
115
+ for i, row in enumerate(spamreader):
116
+ yield i, {"id": str(i), "text": row[0], "label": _CLASSES[int(row[1].strip()) - 1]}