Upload id_hatespeech.py with huggingface_hub
Browse files- id_hatespeech.py +12 -12
id_hatespeech.py
CHANGED
@@ -4,9 +4,9 @@ from typing import Dict, List, Tuple
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
-
from
|
8 |
-
from
|
9 |
-
from
|
10 |
|
11 |
_CITATION = """\
|
12 |
@inproceedings{inproceedings,
|
@@ -36,7 +36,7 @@ _URLS = {
|
|
36 |
}
|
37 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
38 |
_SOURCE_VERSION = "1.0.0"
|
39 |
-
|
40 |
|
41 |
|
42 |
class IdHatespeech(datasets.GeneratorBasedBuilder):
|
@@ -44,21 +44,21 @@ class IdHatespeech(datasets.GeneratorBasedBuilder):
|
|
44 |
designed for hate speech detection NLP task."""
|
45 |
|
46 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
47 |
-
|
48 |
|
49 |
BUILDER_CONFIGS = [
|
50 |
-
|
51 |
name="id_hatespeech_source",
|
52 |
version=SOURCE_VERSION,
|
53 |
description="ID Hatespeech source schema",
|
54 |
schema="source",
|
55 |
subset_id="id_hatespeech",
|
56 |
),
|
57 |
-
|
58 |
-
name="
|
59 |
-
version=
|
60 |
description="ID Hatespeech Nusantara schema",
|
61 |
-
schema="
|
62 |
subset_id="id_hatespeech",
|
63 |
),
|
64 |
]
|
@@ -68,7 +68,7 @@ class IdHatespeech(datasets.GeneratorBasedBuilder):
|
|
68 |
def _info(self) -> datasets.DatasetInfo:
|
69 |
if self.config.schema == "source":
|
70 |
features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
|
71 |
-
elif self.config.schema == "
|
72 |
features = schemas.text_features(["Non_HS", "HS"])
|
73 |
|
74 |
|
@@ -111,7 +111,7 @@ class IdHatespeech(datasets.GeneratorBasedBuilder):
|
|
111 |
}
|
112 |
yield row.id, ex
|
113 |
|
114 |
-
elif self.config.schema == "
|
115 |
for row in df.itertuples():
|
116 |
ex = {
|
117 |
"id": str(row.id),
|
|
|
4 |
import datasets
|
5 |
import pandas as pd
|
6 |
|
7 |
+
from seacrowd.utils import schemas
|
8 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
9 |
+
from seacrowd.utils.constants import Tasks
|
10 |
|
11 |
_CITATION = """\
|
12 |
@inproceedings{inproceedings,
|
|
|
36 |
}
|
37 |
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS]
|
38 |
_SOURCE_VERSION = "1.0.0"
|
39 |
+
_SEACROWD_VERSION = "2024.06.20"
|
40 |
|
41 |
|
42 |
class IdHatespeech(datasets.GeneratorBasedBuilder):
|
|
|
44 |
designed for hate speech detection NLP task."""
|
45 |
|
46 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
47 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
48 |
|
49 |
BUILDER_CONFIGS = [
|
50 |
+
SEACrowdConfig(
|
51 |
name="id_hatespeech_source",
|
52 |
version=SOURCE_VERSION,
|
53 |
description="ID Hatespeech source schema",
|
54 |
schema="source",
|
55 |
subset_id="id_hatespeech",
|
56 |
),
|
57 |
+
SEACrowdConfig(
|
58 |
+
name="id_hatespeech_seacrowd_text",
|
59 |
+
version=SEACROWD_VERSION,
|
60 |
description="ID Hatespeech Nusantara schema",
|
61 |
+
schema="seacrowd_text",
|
62 |
subset_id="id_hatespeech",
|
63 |
),
|
64 |
]
|
|
|
68 |
def _info(self) -> datasets.DatasetInfo:
|
69 |
if self.config.schema == "source":
|
70 |
features = datasets.Features({"tweet": datasets.Value("string"), "label": datasets.Value("string")})
|
71 |
+
elif self.config.schema == "seacrowd_text":
|
72 |
features = schemas.text_features(["Non_HS", "HS"])
|
73 |
|
74 |
|
|
|
111 |
}
|
112 |
yield row.id, ex
|
113 |
|
114 |
+
elif self.config.schema == "seacrowd_text":
|
115 |
for row in df.itertuples():
|
116 |
ex = {
|
117 |
"id": str(row.id),
|