Mostafa ElAraby commited on
Commit
bd9b272
·
1 Parent(s): 3e7e116

updated script

Browse files
Files changed (1) hide show
  1. save_parquet.py +34 -8
save_parquet.py CHANGED
@@ -14,6 +14,16 @@ from tqdm import tqdm
14
  from sklearn.model_selection import train_test_split
15
  import multiprocessing
16
  from concurrent.futures import ProcessPoolExecutor
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  class ArabicOCRDatasetConverter:
@@ -47,8 +57,14 @@ class ArabicOCRDatasetConverter:
47
  parts = line.split(",")
48
  if len(parts) < 2:
49
  parts = line.split("\t")
 
 
 
50
  if "/" in parts[0]:
51
  parts[0] = parts[0].split("/")[-1]
 
 
 
52
  annotations[parts[0].strip()] = parts[1].strip()
53
  except Exception as e:
54
  self.logger.error(f"Error parsing {annotation_path}: {e}")
@@ -247,7 +263,9 @@ def generate_synthetic_bigrams():
247
  print(f"Train Bigram Synthetic shape: {final_train.shape}")
248
 
249
 
250
- def process_ktsrv_dataset(dataset_root: str, output_dir: str = "ktsrv_parquets"):
 
 
251
  root = Path(dataset_root)
252
  out_dir = Path(output_dir)
253
  out_dir.mkdir(parents=True, exist_ok=True)
@@ -266,17 +284,25 @@ def process_ktsrv_dataset(dataset_root: str, output_dir: str = "ktsrv_parquets")
266
  print(f"Skipping {split_type}-{lang}, no gt.txt found")
267
  continue
268
 
269
- converter = ArabicOCRDatasetConverter(dataset_dir=str(folder) + "/" + lang)
270
- parquet_name = f"ktsrv_{split_type}_{lang}.parquet"
271
- parquet_path = out_dir / parquet_name
272
- df = converter.save_parquet(
273
- output_path=str(parquet_path),
274
- annotation_path=str(gt_file),
275
- include_images=True,
276
  )
277
  print(f"Saved {parquet_path} with {len(df)} samples")
278
 
279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
  if __name__ == "__main__":
281
  # final_train_df, final_test_df = process_and_combine_datasets()
282
  # generate_synthetic_bigrams()
 
14
  from sklearn.model_selection import train_test_split
15
  import multiprocessing
16
  from concurrent.futures import ProcessPoolExecutor
17
+ import re
18
+
19
+ arabic_pattern = re.compile(r"[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF]")
20
+ english_pattern = re.compile(r"[A-Za-z]")
21
+
22
+
23
+ def contains_english_and_arabic(line: str) -> bool:
24
+ has_arabic = bool(arabic_pattern.search(line))
25
+ has_english = bool(english_pattern.search(line))
26
+ return has_arabic and has_english
27
 
28
 
29
  class ArabicOCRDatasetConverter:
 
57
  parts = line.split(",")
58
  if len(parts) < 2:
59
  parts = line.split("\t")
60
+ if len(parts) < 2:
61
+ self.logger.warning(f"Skipping invalid line: {line}")
62
+ continue
63
  if "/" in parts[0]:
64
  parts[0] = parts[0].split("/")[-1]
65
+ if contains_english_and_arabic(parts[1]):
66
+ self.logger.warning(f"Skipping mixed language line: {parts[1]}")
67
+ continue
68
  annotations[parts[0].strip()] = parts[1].strip()
69
  except Exception as e:
70
  self.logger.error(f"Error parsing {annotation_path}: {e}")
 
263
  print(f"Train Bigram Synthetic shape: {final_train.shape}")
264
 
265
 
266
+ def process_ktsrv_dataset(
267
+ dataset_root: str, output_dir: str = "ktsrv_parquets", prefix="ktsrv"
268
+ ):
269
  root = Path(dataset_root)
270
  out_dir = Path(output_dir)
271
  out_dir.mkdir(parents=True, exist_ok=True)
 
284
  print(f"Skipping {split_type}-{lang}, no gt.txt found")
285
  continue
286
 
287
+ parquet_path, df = convert_folder_to_parquet(
288
+ prefix, out_dir, split_type + "_" + lang, str(folder) + "/" + lang, gt_file
 
 
 
 
 
289
  )
290
  print(f"Saved {parquet_path} with {len(df)} samples")
291
 
292
 
293
+ def convert_folder_to_parquet(prefix, out_dir, suffix, folder, gt_file):
294
+ converter = ArabicOCRDatasetConverter(dataset_dir=folder)
295
+ parquet_name = f"{prefix}_{suffix}.parquet"
296
+ parquet_path = out_dir + "/" + parquet_name
297
+ df = converter.save_parquet(
298
+ output_path=str(parquet_path),
299
+ annotation_path=str(gt_file),
300
+ include_images=True,
301
+ )
302
+
303
+ return parquet_path, df
304
+
305
+
306
  if __name__ == "__main__":
307
  # final_train_df, final_test_df = process_and_combine_datasets()
308
  # generate_synthetic_bigrams()