import random import pandas as pd import pyarrow as pa import pyarrow.parquet as pq from pathlib import Path import cv2 import logging from typing import List, Dict, Tuple import json import numpy as np from PIL import Image import io from tqdm import tqdm from sklearn.model_selection import train_test_split import multiprocessing from concurrent.futures import ProcessPoolExecutor import re arabic_pattern = re.compile(r"[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]") english_pattern = re.compile(r"[A-Za-z]") def contains_english_and_arabic(line: str) -> bool: has_arabic = bool(arabic_pattern.search(line)) has_english = bool(english_pattern.search(line)) return has_arabic and has_english class ArabicOCRDatasetConverter: def __init__(self, dataset_dir: str): self.dataset_dir = Path(dataset_dir) self.setup_logging() def setup_logging(self): logging.basicConfig( level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" ) self.logger = logging.getLogger(__name__) def get_images(self) -> List[Path]: image_files = {} for ext in ("*.jpg", "*.jpeg", "*.png"): image_files.update({path.name: path for path in self.dataset_dir.glob(ext)}) image_files.update( {path.name: path for path in self.dataset_dir.glob(ext.upper())} ) image_files = list(image_files.values()) random.shuffle(image_files) return image_files def parse_annotation_file(self, annotation_path: Path) -> Dict: annotations = {} try: with open(annotation_path, "r", encoding="utf-8") as f: for line in tqdm(f, desc="Parsing annotations"): line = line.strip().rstrip(",") parts = line.split(",") if len(parts) < 2: parts = line.split("\t") if len(parts) < 2: self.logger.warning(f"Skipping invalid line: {line}") continue if "/" in parts[0]: parts[0] = parts[0].split("/")[-1] if contains_english_and_arabic(parts[1]): self.logger.warning(f"Skipping mixed language line: {parts[1]}") continue annotations[parts[0].strip()] = parts[1].strip() except Exception as e: self.logger.error(f"Error parsing {annotation_path}: {e}") return annotations def process_image(self, img_path: Path, annotations: Dict) -> Dict: img = Image.open(str(img_path)) if img is None or img_path.name not in annotations: self.logger.warning(f"Could not read image: {img_path}") return None img_width, img_height = img.size with open(img_path, "rb") as f: img_bytes = f.read() entry = { "image_name": img_path.stem, "image_bytes": img_bytes, "image_width": img_width, "image_height": img_height, "full_text": annotations[img_path.name], } return entry def create_dataset(self, txt_path, include_images: bool = True) -> pd.DataFrame: images = self.get_images() annotations = self.parse_annotation_file(txt_path) data = [] if len(images) > 10000: for image in tqdm(images, desc="Processing images"): data.append(self.process_image(image, annotations)) else: with ProcessPoolExecutor( max_workers=(multiprocessing.cpu_count()) ) as executor: results = list( tqdm( executor.map( self.process_image, images, [annotations] * len(images) ), total=len(images), desc="Processing images", ) ) for result in results: if result: data.append(result) return pd.DataFrame(data) def save_parquet( self, output_path: str, annotation_path: str, include_images: bool = True ) -> pd.DataFrame: df = self.create_dataset(annotation_path, include_images) if df.empty: self.logger.error("No data to save!") return df try: table = pa.Table.from_pandas(df) pq.write_table(table, output_path, compression="snappy") self.logger.info(f"Created parquet file at {output_path}") self.logger.info(f"Dataset shape: {df.shape}") self.logger.info("\nSample data:") print("\nSample entry:") sample = df.iloc[0] print(f"Image name: {sample['image_name']}") print(f"Image size: {len(sample['image_bytes'])} bytes") print(f"Dimensions: {sample['image_width']}x{sample['image_height']}") print(f"Full text: {sample['full_text']}") return df except Exception as e: self.logger.error(f"Error saving parquet file: {e}") return df @staticmethod def read_parquet(parquet_path: str, index: int = 0) -> Dict: df = pd.read_parquet(parquet_path) entry = df.iloc[index] img_bytes = entry["image_bytes"] nparr = np.frombuffer(img_bytes, np.uint8) img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) return { "image_name": entry["image_name"], "image": img, "image_width": entry["image_width"], "image_height": entry["image_height"], "full_text": entry["full_text"], } def process_and_combine_datasets(): ## Process and save English Train, Test datasets converter_en_train = ArabicOCRDatasetConverter(dataset_dir="English_words_train") train_en_df = converter_en_train.save_parquet( output_path="train_en.parquet", annotation_path="English_words_train/gt.txt", include_images=True, ) converter_en_test = ArabicOCRDatasetConverter(dataset_dir="English_words_test") test_en_df = converter_en_test.save_parquet( output_path="test_en.parquet", annotation_path="English_words_test/gt.txt", include_images=True, ) ## Process and save Arabic Real Train, Test datasets converter_train = ArabicOCRDatasetConverter(dataset_dir="Arabic_words_train") train_df = converter_train.save_parquet( output_path="train_ar_real.parquet", annotation_path="Arabic_words_train/gt.txt", include_images=True, ) converter_test = ArabicOCRDatasetConverter(dataset_dir="Arabic_words_test") test_df = converter_test.save_parquet( output_path="test_ar.parquet", annotation_path="Arabic_words_test/gt.txt", include_images=True, ) ## Process and save Synthetic Arabic Train, Test datasets converter_gensynth = ArabicOCRDatasetConverter( dataset_dir="GenSynthAr/Arabic/Images" ) gensynth_df = converter_gensynth.save_parquet( output_path="gensynth_full.parquet", annotation_path="GenSynthAr/Arabic/GenSynthAr.txt", include_images=True, ) ## Process and save numbers Train, Test datasets converter_numbers_train = ArabicOCRDatasetConverter(dataset_dir="numbers_train/") numbers_train_df = converter_numbers_train.save_parquet( output_path="train_numbers.parquet", annotation_path="numbers_train/labels.txt", include_images=True, ) converter_numbers_test = ArabicOCRDatasetConverter(dataset_dir="numbers_test/") numbers_test_df = converter_numbers_test.save_parquet( output_path="test_numbers.parquet", annotation_path="numbers_test/gt.txt", include_images=True, ) ## Combine Real training datasets english + arabic final_train_df_real = pd.concat([train_df, train_en_df], ignore_index=True) table_train = pa.Table.from_pandas(final_train_df_real) pq.write_table(table_train, "train_real.parquet", compression="snappy") ## Generate full train dataset final_train_df = pd.concat( [train_df, train_en_df, gensynth_df, numbers_train_df], ignore_index=True ) print("Length of Real train data {}".format(len(train_df) + len(train_en_df))) table_train = pa.Table.from_pandas(final_train_df) pq.write_table(table_train, "train.parquet", compression="snappy") ## Generate final test dataset final_test_df = pd.concat([test_df, test_en_df, numbers_test_df], ignore_index=True) table_test = pa.Table.from_pandas(final_test_df) pq.write_table(table_test, "test.parquet", compression="snappy") print(f"Train Real shape: {final_train_df_real.shape}") print(f"Train set shape: {final_train_df.shape}") print(f"test set shape: {final_test_df.shape}") return final_train_df, test_df def generate_synthetic_bigrams(): ## Process and save Synthetic Arabic Train, Test datasets converter_gensynth = ArabicOCRDatasetConverter(dataset_dir="ArGenData/imgs") gensynth_ar_df = converter_gensynth.save_parquet( output_path="gensynth_ar.parquet", annotation_path="ArGenData/GenSynthAr.txt", include_images=True, ) converter_en_gensynth = ArabicOCRDatasetConverter(dataset_dir="EnGenData/imgs") gensynth_en_df = converter_en_gensynth.save_parquet( output_path="gensynth_en.parquet", annotation_path="EnGenData/GenSynthAr.txt", include_images=True, ) final_train_df = pd.concat([gensynth_en_df, gensynth_ar_df], ignore_index=True) final_train = pa.Table.from_pandas(final_train_df) pq.write_table(final_train, "train_bigram.parquet", compression="snappy") print(f"Train Bigram Synthetic shape: {final_train.shape}") def process_ktsrv_dataset( dataset_root: str, output_dir: str = "ktsrv_parquets", prefix="ktsrv" ): root = Path(dataset_root) out_dir = Path(output_dir) out_dir.mkdir(parents=True, exist_ok=True) splits = [ ("real", "arabic"), ("real", "english"), ("synthetic", "arabic"), ("synthetic", "english"), ] for split_type, lang in splits: folder = root / split_type / lang gt_file = folder / "gt.txt" if not gt_file.exists(): print(f"Skipping {split_type}-{lang}, no gt.txt found") continue parquet_path, df = convert_folder_to_parquet( prefix, out_dir, split_type + "_" + lang, str(folder) + "/" + lang, gt_file ) print(f"Saved {parquet_path} with {len(df)} samples") def convert_folder_to_parquet(prefix, out_dir, suffix, folder, gt_file): converter = ArabicOCRDatasetConverter(dataset_dir=folder) parquet_name = f"{prefix}_{suffix}.parquet" parquet_path = out_dir + "/" + parquet_name df = converter.save_parquet( output_path=str(parquet_path), annotation_path=str(gt_file), include_images=True, ) return parquet_path, df if __name__ == "__main__": # final_train_df, final_test_df = process_and_combine_datasets() # generate_synthetic_bigrams() process_ktsrv_dataset(r"KSTRV1\recognition", ".")