Datasets:
feat: upload script
Browse files
face_masks.py → facial_keypoint_detection.py
RENAMED
|
@@ -93,7 +93,8 @@ class FacialKeypointDetection(datasets.GeneratorBasedBuilder):
|
|
| 93 |
features=datasets.Features({
|
| 94 |
'image_id': datasets.Value('uint32'),
|
| 95 |
'image': datasets.Image(),
|
| 96 |
-
'mask': datasets.Image()
|
|
|
|
| 97 |
}),
|
| 98 |
supervised_keys=None,
|
| 99 |
homepage=_HOMEPAGE,
|
|
@@ -105,44 +106,38 @@ class FacialKeypointDetection(datasets.GeneratorBasedBuilder):
|
|
| 105 |
masks = dl_manager.download_and_extract(f"{_DATA}masks.zip")
|
| 106 |
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
|
| 107 |
images = dl_manager.iter_files(images)
|
|
|
|
|
|
|
| 108 |
return [
|
| 109 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
| 110 |
gen_kwargs={
|
| 111 |
"images": images,
|
|
|
|
| 112 |
'annotations': annotations
|
| 113 |
}),
|
| 114 |
]
|
| 115 |
|
| 116 |
-
def _generate_examples(self, images, annotations):
|
| 117 |
annotations_df = pd.read_csv(annotations, sep=',')
|
| 118 |
-
images_data = pd.DataFrame(
|
| 119 |
-
|
|
|
|
| 120 |
images_data.loc[idx] = {
|
| 121 |
-
'
|
| 122 |
-
'
|
|
|
|
| 123 |
}
|
| 124 |
|
| 125 |
annotations_df = pd.merge(annotations_df,
|
| 126 |
images_data,
|
| 127 |
how='left',
|
| 128 |
-
on=['
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
|
|
|
| 136 |
}
|
| 137 |
-
|
| 138 |
-
age = annotation.loc[annotation['Type'] == 1]['Age'].values[0]
|
| 139 |
-
country = annotation.loc[annotation['Type'] ==
|
| 140 |
-
1]['Country'].values[0]
|
| 141 |
-
sex = annotation.loc[annotation['Type'] == 1]['Sex'].values[0]
|
| 142 |
-
|
| 143 |
-
data['worker_id'] = worker_id
|
| 144 |
-
data['age'] = age
|
| 145 |
-
data['country'] = country
|
| 146 |
-
data['sex'] = sex
|
| 147 |
-
|
| 148 |
-
yield idx, data
|
|
|
|
| 93 |
features=datasets.Features({
|
| 94 |
'image_id': datasets.Value('uint32'),
|
| 95 |
'image': datasets.Image(),
|
| 96 |
+
'mask': datasets.Image(),
|
| 97 |
+
'key_points': datasets.Value('string')
|
| 98 |
}),
|
| 99 |
supervised_keys=None,
|
| 100 |
homepage=_HOMEPAGE,
|
|
|
|
| 106 |
masks = dl_manager.download_and_extract(f"{_DATA}masks.zip")
|
| 107 |
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
|
| 108 |
images = dl_manager.iter_files(images)
|
| 109 |
+
masks = dl_manager.iter_files(masks)
|
| 110 |
+
|
| 111 |
return [
|
| 112 |
datasets.SplitGenerator(name=datasets.Split.TRAIN,
|
| 113 |
gen_kwargs={
|
| 114 |
"images": images,
|
| 115 |
+
"masks": masks,
|
| 116 |
'annotations': annotations
|
| 117 |
}),
|
| 118 |
]
|
| 119 |
|
| 120 |
+
def _generate_examples(self, images, masks, annotations):
|
| 121 |
annotations_df = pd.read_csv(annotations, sep=',')
|
| 122 |
+
images_data = pd.DataFrame(
|
| 123 |
+
columns=['image_name', 'image_path', 'mask_path'])
|
| 124 |
+
for idx, (image_path, mask_path) in enumerate(zip(images, masks)):
|
| 125 |
images_data.loc[idx] = {
|
| 126 |
+
'image_name': image_path.split('/')[-1],
|
| 127 |
+
'image_path': image_path,
|
| 128 |
+
'mask_path': mask_path
|
| 129 |
}
|
| 130 |
|
| 131 |
annotations_df = pd.merge(annotations_df,
|
| 132 |
images_data,
|
| 133 |
how='left',
|
| 134 |
+
on=['image_name'])
|
| 135 |
+
|
| 136 |
+
for row in annotations_df.sort_values(['image_name'
|
| 137 |
+
]).itertuples(index=False):
|
| 138 |
+
yield idx, {
|
| 139 |
+
'image_id': row[0],
|
| 140 |
+
'image': load_image_file(row[3]),
|
| 141 |
+
'mask': load_image_file(row[4]),
|
| 142 |
+
'key_points': row[2]
|
| 143 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|