Melaraby commited on
Commit
7268c21
·
verified ·
1 Parent(s): d1dfb85

Upload 3 files

Browse files
Files changed (3) hide show
  1. save_parquet.py +167 -0
  2. test.parquet +3 -0
  3. train.parquet +3 -0
save_parquet.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import pyarrow as pa
3
+ import pyarrow.parquet as pq
4
+ from pathlib import Path
5
+ import cv2
6
+ import logging
7
+ from typing import List, Dict, Tuple
8
+ import json
9
+
10
+
11
+ class ArabicOCRDatasetConverter:
12
+ def __init__(self, dataset_dir: str):
13
+ self.dataset_dir = Path(dataset_dir)
14
+ self.setup_logging()
15
+
16
+ def setup_logging(self):
17
+ logging.basicConfig(
18
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
19
+ )
20
+ self.logger = logging.getLogger(__name__)
21
+
22
+ def get_image_annotation_pairs(self) -> List[Tuple[Path, Path]]:
23
+ pairs = []
24
+ for img_path in self.dataset_dir.glob("*.jpg"):
25
+ txt_path = img_path.with_suffix(".txt")
26
+ if txt_path.exists():
27
+ pairs.append((img_path, txt_path))
28
+ else:
29
+ self.logger.warning(f"No annotation file found for {img_path}")
30
+ return pairs
31
+
32
+ def order_text_instances(self, annotations: List[Dict]) -> str:
33
+ """
34
+ Order text instances from top to bottom, right to left using coordinates
35
+ """
36
+
37
+ # For each annotation, get the top y-coordinate and rightmost x-coordinate
38
+ def parse_coordinates(flattened):
39
+ return [
40
+ (flattened[i], flattened[i + 1]) for i in range(0, len(flattened), 2)
41
+ ]
42
+
43
+ def get_center(flattened_coordinates):
44
+ polygon = parse_coordinates(flattened_coordinates)
45
+ x_coords = [point[0] for point in polygon]
46
+ y_coords = [point[1] for point in polygon]
47
+ return (sum(x_coords) / len(polygon), sum(y_coords) / len(polygon))
48
+
49
+ def arabic_sort_key(annotation):
50
+ center = get_center(annotation["coordinates"])
51
+ return (center[1], -center[0])
52
+
53
+ def non_arabic_sort_key(annotation):
54
+ center = get_center(annotation["coordinates"])
55
+ return (center[1], center[0])
56
+
57
+ arabic_annotations = [a for a in annotations if a["language"] == "Arabic"]
58
+
59
+ arabic_annotations = sorted(arabic_annotations, key=arabic_sort_key)
60
+ english_annotations = [a for a in annotations if a["language"] != "Arabic"]
61
+ english_annotations = sorted(english_annotations, key=non_arabic_sort_key)
62
+ # Join all text with spaces
63
+ full_text = " ".join(
64
+ ann["text"] for ann in arabic_annotations + english_annotations
65
+ )
66
+
67
+ return full_text
68
+
69
+ def parse_annotation_file(self, annotation_path: Path) -> List[Dict]:
70
+ annotations = []
71
+ try:
72
+ with open(annotation_path, "r", encoding="utf-8") as f:
73
+ for line in f:
74
+ line = line.strip().rstrip(",")
75
+ parts = line.split(",")
76
+ if len(parts) >= 10:
77
+ annotation = {
78
+ "coordinates": [float(x) for x in parts[:8]],
79
+ "language": parts[8],
80
+ "text": parts[9],
81
+ }
82
+ annotations.append(annotation)
83
+ else:
84
+ self.logger.warning(
85
+ f"Line has insufficient elements in {annotation_path}: {line}"
86
+ )
87
+ except Exception as e:
88
+ self.logger.error(f"Error parsing {annotation_path}: {e}")
89
+ return annotations
90
+
91
+ def create_dataset(self, include_images: bool = False) -> pd.DataFrame:
92
+ data = []
93
+ pairs = self.get_image_annotation_pairs()
94
+
95
+ for img_path, txt_path in pairs:
96
+ try:
97
+ # Read image properties
98
+ img = cv2.imread(str(img_path))
99
+ if img is None:
100
+ self.logger.warning(f"Could not read image: {img_path}")
101
+ continue
102
+
103
+ img_height, img_width = img.shape[:2]
104
+
105
+ # Get annotations
106
+ annotations = self.parse_annotation_file(txt_path)
107
+
108
+ # Create a single entry for the image
109
+ entry = {
110
+ "image_name": img_path.stem,
111
+ "instances": [
112
+ {
113
+ "coordinates": ann["coordinates"],
114
+ "language": ann["language"],
115
+ "text": ann["text"],
116
+ }
117
+ for ann in annotations
118
+ ],
119
+ "full_text": self.order_text_instances(annotations),
120
+ }
121
+
122
+ if include_images:
123
+ entry["image_data"] = img.tobytes()
124
+
125
+ data.append(entry)
126
+
127
+ except Exception as e:
128
+ self.logger.error(f"Error processing {img_path}: {e}")
129
+
130
+ return pd.DataFrame(data)
131
+
132
+ def save_parquet(self, output_path: str, include_images: bool = False) -> None:
133
+ df = self.create_dataset(include_images)
134
+
135
+ if df.empty:
136
+ self.logger.error("No data to save!")
137
+ return
138
+
139
+ try:
140
+ # Convert the instances list to a JSON string for storage
141
+ df["instances"] = df["instances"].apply(json.dumps)
142
+
143
+ table = pa.Table.from_pandas(df)
144
+ pq.write_table(table, output_path, compression="snappy")
145
+
146
+ self.logger.info(f"Created parquet file at {output_path}")
147
+ self.logger.info(f"Dataset shape: {df.shape}")
148
+ self.logger.info("\nSample data:")
149
+ print("\nSample entry:")
150
+ sample = df.iloc[0]
151
+ print(f"Image: {sample['image_name']}")
152
+ print(f"Full text: {sample['full_text']}")
153
+ print(f"Instances: {json.loads(sample['instances'])}")
154
+
155
+ except Exception as e:
156
+ self.logger.error(f"Error saving parquet file: {e}")
157
+
158
+
159
+ # Example usage
160
+ if __name__ == "__main__":
161
+ converter = ArabicOCRDatasetConverter(dataset_dir=r"Det_train")
162
+
163
+ converter.save_parquet(output_path="train.parquet", include_images=False)
164
+
165
+ converter = ArabicOCRDatasetConverter(dataset_dir=r"Det_test")
166
+
167
+ converter.save_parquet(output_path="test.parquet", include_images=False)
test.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef3e81a1e57b940c1a04a50903c1a58f6360c82b0c529e3f0d9c6b82e030c486
3
+ size 91446
train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d364aa97a8b098f4ed34414bbeaaaab6bc651e961c72e3188f4c0d412adfc5
3
+ size 273203