Mostafa ElAraby
updated img files using pil
b8da998
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from pathlib import Path
import cv2
import logging
from typing import List, Dict, Tuple
import json
import numpy as np
from PIL import Image
import io
class ArabicOCRDatasetConverter:
def __init__(self, dataset_dir: str):
self.dataset_dir = Path(dataset_dir)
self.setup_logging()
def setup_logging(self):
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
)
self.logger = logging.getLogger(__name__)
def get_image_annotation_pairs(self) -> List[Tuple[Path, Path]]:
pairs = []
for img_path in self.dataset_dir.glob("*.jpg"):
txt_path = img_path.with_suffix(".txt")
if txt_path.exists():
pairs.append((img_path, txt_path))
else:
self.logger.warning(f"No annotation file found for {img_path}")
return pairs
def order_text_instances(self, annotations: List[Dict]) -> str:
"""
Order text instances from top to bottom, right to left using coordinates
"""
# For each annotation, get the top y-coordinate and rightmost x-coordinate
def parse_coordinates(flattened):
return [
(flattened[i], flattened[i + 1]) for i in range(0, len(flattened), 2)
]
def get_center(flattened_coordinates):
polygon = parse_coordinates(flattened_coordinates)
x_coords = [point[0] for point in polygon]
y_coords = [point[1] for point in polygon]
return (sum(x_coords) / len(polygon), sum(y_coords) / len(polygon))
def arabic_sort_key(annotation):
center = get_center(annotation["coordinates"])
return (center[1], -center[0])
def non_arabic_sort_key(annotation):
center = get_center(annotation["coordinates"])
return (center[1], center[0])
arabic_annotations = [a for a in annotations if a["language"] == "Arabic"]
arabic_annotations = sorted(arabic_annotations, key=arabic_sort_key)
english_annotations = [a for a in annotations if a["language"] != "Arabic"]
english_annotations = sorted(english_annotations, key=non_arabic_sort_key)
# Join all text with spaces
full_text = " ".join(
ann["text"] for ann in arabic_annotations + english_annotations
)
return full_text
def parse_annotation_file(self, annotation_path: Path) -> List[Dict]:
annotations = []
try:
with open(annotation_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip().rstrip(",")
parts = line.split(",")
if len(parts) >= 10:
annotation = {
"coordinates": [float(x) for x in parts[:8]],
"language": parts[8],
"text": parts[9],
}
annotations.append(annotation)
else:
self.logger.warning(
f"Line has insufficient elements in {annotation_path}: {line}"
)
except Exception as e:
self.logger.error(f"Error parsing {annotation_path}: {e}")
return annotations
def create_dataset(self, include_images: bool = True) -> pd.DataFrame:
data = []
pairs = self.get_image_annotation_pairs()
for img_path, txt_path in pairs:
try:
# Read image properties and data
img = Image.open(str(img_path))
if img is None:
self.logger.warning(f"Could not read image: {img_path}")
continue
img_width, img_height = img.size
# Convert PIL image to bytes
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format="PNG")
img_bytes = img_byte_arr.getvalue()
# Get annotations
annotations = self.parse_annotation_file(txt_path)
# Create a single entry for the image
entry = {
"image_name": img_path.stem,
"image_bytes": img_bytes, # Convert to bytes
"image_width": img_width,
"image_height": img_height,
"instances": json.dumps(
[
{ # Convert list to JSON string
"coordinates": ann["coordinates"],
"language": ann["language"],
"text": ann["text"],
}
for ann in annotations
]
),
"full_text": self.order_text_instances(annotations),
}
data.append(entry)
except Exception as e:
self.logger.error(f"Error processing {img_path}: {e}")
return pd.DataFrame(data)
def save_parquet(self, output_path: str, include_images: bool = True) -> None:
df = self.create_dataset(include_images)
if df.empty:
self.logger.error("No data to save!")
return
try:
table = pa.Table.from_pandas(df)
pq.write_table(table, output_path, compression="snappy")
self.logger.info(f"Created parquet file at {output_path}")
self.logger.info(f"Dataset shape: {df.shape}")
self.logger.info("\nSample data:")
print("\nSample entry:")
sample = df.iloc[0]
print(f"Image name: {sample['image_name']}")
print(f"Image size: {len(sample['image_bytes'])} bytes")
print(f"Dimensions: {sample['image_width']}x{sample['image_height']}")
print(f"Full text: {sample['full_text']}")
print(f"Instances: {sample['instances']}")
except Exception as e:
self.logger.error(f"Error saving parquet file: {e}")
@staticmethod
def read_parquet(parquet_path: str, index: int = 0) -> Dict:
"""
Read a single entry from the parquet file
Args:
parquet_path: Path to parquet file
index: Index of entry to read
Returns:
Dictionary containing image and annotations
"""
df = pd.read_parquet(parquet_path)
entry = df.iloc[index]
# Convert image bytes back to numpy array
img_bytes = entry["image_bytes"]
nparr = np.frombuffer(img_bytes, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
# Parse instances JSON
instances = json.loads(entry["instances"])
return {
"image_name": entry["image_name"],
"image": img,
"image_width": entry["image_width"],
"image_height": entry["image_height"],
"instances": instances,
"full_text": entry["full_text"],
}
if __name__ == "__main__":
converter = ArabicOCRDatasetConverter(dataset_dir=r"Det_train")
converter.save_parquet(output_path="train_detection.parquet", include_images=False)
converter = ArabicOCRDatasetConverter(dataset_dir=r"Det_test")
converter.save_parquet(output_path="test_detection.parquet", include_images=False)