trojblue commited on
Commit
2efaa3d
·
verified ·
1 Parent(s): 4949d0b

Upload postproc_data.ipynb

Browse files
Files changed (1) hide show
  1. postproc_data.ipynb +500 -0
postproc_data.ipynb ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "context:\n",
8
+ "- All images from sakugabooru has been downloaded, with schema:\n",
9
+ " - `./data/post_{id}/post_{id}.{ext}`\n",
10
+ " - `./data/post_{id}/post_{id}.json`\n",
11
+ "- Now we need to upload them to huggingface by tarring it.\n",
12
+ "\n",
13
+ "To install the libraries:\n",
14
+ "\n",
15
+ "```bash\n",
16
+ "pip install unibox hfutils\n",
17
+ "```"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 3,
23
+ "metadata": {},
24
+ "outputs": [],
25
+ "source": [
26
+ "import os\n",
27
+ "import tarfile\n",
28
+ "from pathlib import Path\n",
29
+ "from typing import List\n",
30
+ "import unibox as ub"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "markdown",
35
+ "metadata": {},
36
+ "source": [
37
+ "get available files:"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 4,
43
+ "metadata": {},
44
+ "outputs": [
45
+ {
46
+ "name": "stderr",
47
+ "output_type": "stream",
48
+ "text": [
49
+ " \r"
50
+ ]
51
+ }
52
+ ],
53
+ "source": [
54
+ "# Example usage:\n",
55
+ "data_dir = \"/rmt/yada/dev/sakuga-scraper/data\"\n",
56
+ "output_dir = \"/rmt/yada/dev/sakuga-scraper/tars\"\n",
57
+ "\n",
58
+ "# Collect all valid media files\n",
59
+ "all_files = ub.traverses(\"/rmt/yada/dev/sakuga-scraper/data\")"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": 8,
65
+ "metadata": {},
66
+ "outputs": [
67
+ {
68
+ "name": "stdout",
69
+ "output_type": "stream",
70
+ "text": [
71
+ "all files: 404160\n",
72
+ "image files: 8680, video files: 155238, json files: 240242\n",
73
+ "total unique files: 404160\n"
74
+ ]
75
+ }
76
+ ],
77
+ "source": [
78
+ "# Convert extensions to sets for validation\n",
79
+ "image_extensions = set(ub.IMG_FILES) - {'.gif'}\n",
80
+ "video_extensions = {'.webm', '.mp4', '.gif'}\n",
81
+ "\n",
82
+ "# Categorize files\n",
83
+ "image_files = [f for f in all_files if Path(f).suffix in image_extensions]\n",
84
+ "video_files = [f for f in all_files if Path(f).suffix in video_extensions]\n",
85
+ "json_files = [f for f in all_files if Path(f).suffix == '.json']\n",
86
+ "\n",
87
+ "# Recompute lengths\n",
88
+ "total_unique_files = len(set(image_files + video_files + json_files))\n",
89
+ "print(f\"all files: {len(all_files)}\\nimage files: {len(image_files)}, video files: {len(video_files)}, json files: {len(json_files)}\\ntotal unique files: {total_unique_files}\")"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": 9,
95
+ "metadata": {},
96
+ "outputs": [
97
+ {
98
+ "data": {
99
+ "text/plain": [
100
+ "(['/rmt/yada/dev/sakuga-scraper/data/post_100058/sankaku_100058.json'],\n",
101
+ " ['/rmt/yada/dev/sakuga-scraper/data/post_105581/sankaku_105581.jpg'],\n",
102
+ " ['/rmt/yada/dev/sakuga-scraper/data/post_102570/sankaku_102570.mp4'])"
103
+ ]
104
+ },
105
+ "execution_count": 9,
106
+ "metadata": {},
107
+ "output_type": "execute_result"
108
+ }
109
+ ],
110
+ "source": [
111
+ "json_files[:1], image_files[:1], video_files[:1]"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 11,
117
+ "metadata": {},
118
+ "outputs": [
119
+ {
120
+ "data": {
121
+ "text/plain": [
122
+ "((27, 272758), (27, 273264))"
123
+ ]
124
+ },
125
+ "execution_count": 11,
126
+ "metadata": {},
127
+ "output_type": "execute_result"
128
+ }
129
+ ],
130
+ "source": [
131
+ "def determine_tar_info(files: List[str], modulo: int = 10000):\n",
132
+ " \"\"\"\n",
133
+ " Determine how many full tars can be created and the current last post number.\n",
134
+ "\n",
135
+ " Args:\n",
136
+ " files (List[str]): List of file paths (images/videos).\n",
137
+ " modulo (int): Constant range of IDs per tar file (default: 10,000).\n",
138
+ "\n",
139
+ " Returns:\n",
140
+ " tuple: (number_of_full_tars, last_post_id)\n",
141
+ " \"\"\"\n",
142
+ " post_ids = []\n",
143
+ " for file_path in files:\n",
144
+ " filename = os.path.basename(file_path)\n",
145
+ " try:\n",
146
+ " post_id = int(filename.split('_')[1].split('.')[0])\n",
147
+ " post_ids.append(post_id)\n",
148
+ " except (IndexError, ValueError):\n",
149
+ " print(f\"Skipping file with invalid format: {filename}\")\n",
150
+ " continue\n",
151
+ "\n",
152
+ " if not post_ids:\n",
153
+ " return 0, None\n",
154
+ "\n",
155
+ " last_post_id = max(post_ids)\n",
156
+ " number_of_full_tars = (last_post_id + 1) // modulo\n",
157
+ "\n",
158
+ " return number_of_full_tars, last_post_id\n",
159
+ "\n",
160
+ "\n",
161
+ "# full tar count, last post id\n",
162
+ "determine_tar_info(image_files), determine_tar_info(video_files)"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "markdown",
167
+ "metadata": {},
168
+ "source": [
169
+ "do the tarring:"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 12,
175
+ "metadata": {},
176
+ "outputs": [
177
+ {
178
+ "name": "stdout",
179
+ "output_type": "stream",
180
+ "text": [
181
+ "We can make 27 full tars, last post id is 273264\n"
182
+ ]
183
+ }
184
+ ],
185
+ "source": [
186
+ "import os\n",
187
+ "import tarfile\n",
188
+ "from pathlib import Path\n",
189
+ "from typing import List\n",
190
+ "\n",
191
+ "def generate_tar_from_files(\n",
192
+ " files: List[str], \n",
193
+ " output_dir: str, \n",
194
+ " id: int, \n",
195
+ " modulo: int = 10000\n",
196
+ ") -> List[str]:\n",
197
+ " \"\"\"\n",
198
+ " Create tar files containing files (images/videos) grouped by post ID ranges.\n",
199
+ "\n",
200
+ " Args:\n",
201
+ " files (List[str]): List of file paths (images/videos).\n",
202
+ " output_dir (str): Directory to store the tar files.\n",
203
+ " id (int): The ID used to determine the range of files to include in the tar.\n",
204
+ " modulo (int): Constant range of IDs per tar file (default: 10,000).\n",
205
+ "\n",
206
+ " Returns:\n",
207
+ " List[str]: List of created tar file paths.\n",
208
+ " \"\"\"\n",
209
+ " output_dir = Path(output_dir)\n",
210
+ " output_dir.mkdir(parents=True, exist_ok=True)\n",
211
+ "\n",
212
+ " # Determine ID range\n",
213
+ " range_start = id * modulo\n",
214
+ " range_end = (id + 1) * modulo\n",
215
+ "\n",
216
+ " # Filter files by ID range\n",
217
+ " files_in_range = []\n",
218
+ " for file_path in files:\n",
219
+ " filename = os.path.basename(file_path)\n",
220
+ " try:\n",
221
+ " post_id = int(filename.split('_')[1].split('.')[0])\n",
222
+ " except (IndexError, ValueError):\n",
223
+ " print(f\"Skipping file with invalid format: {filename}\")\n",
224
+ " continue\n",
225
+ "\n",
226
+ " if range_start <= post_id < range_end:\n",
227
+ " files_in_range.append(file_path)\n",
228
+ "\n",
229
+ " if not files_in_range:\n",
230
+ " print(f\"No files found in range {range_start}-{range_end}.\")\n",
231
+ " return []\n",
232
+ "\n",
233
+ " tar_file_path = output_dir / f\"{id}.tar\"\n",
234
+ "\n",
235
+ " # Create the tar\n",
236
+ " with tarfile.open(tar_file_path, \"w\") as tar:\n",
237
+ " for file_path in files_in_range:\n",
238
+ " tar.add(file_path, arcname=os.path.basename(file_path))\n",
239
+ "\n",
240
+ " print(f\"Created tar: {tar_file_path} with {len(files_in_range)} files.\")\n",
241
+ "\n",
242
+ " return [str(tar_file_path)]\n",
243
+ "\n",
244
+ "# Example usage:\n",
245
+ "# Suppose we have a collection of images & videos:\n",
246
+ "# files = [\n",
247
+ "# \"/path/to/sankaku_16465.jpg\",\n",
248
+ "# \"/path/to/sankaku_20000.mp4\",\n",
249
+ "# ...\n",
250
+ "# ]\n",
251
+ "\n",
252
+ "# See how many tars and the highest post ID:\n",
253
+ "media_files = video_files + image_files\n",
254
+ "tar_count, last_id = determine_tar_info(media_files)\n",
255
+ "print(\"We can make\", tar_count, \"full tars, last post id is\", last_id)"
256
+ ]
257
+ },
258
+ {
259
+ "cell_type": "code",
260
+ "execution_count": 13,
261
+ "metadata": {},
262
+ "outputs": [
263
+ {
264
+ "name": "stdout",
265
+ "output_type": "stream",
266
+ "text": [
267
+ "Created tar: output/0.tar with 1827 files.\n",
268
+ "['output/0.tar']\n"
269
+ ]
270
+ }
271
+ ],
272
+ "source": [
273
+ "# Create the tar for the first batch (id=0):\n",
274
+ "tars_created = generate_tar_from_files(media_files, output_dir=\"output\", id=0)\n",
275
+ "print(tars_created)"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "markdown",
280
+ "metadata": {},
281
+ "source": [
282
+ "make indexes:"
283
+ ]
284
+ },
285
+ {
286
+ "cell_type": "code",
287
+ "execution_count": 17,
288
+ "metadata": {},
289
+ "outputs": [
290
+ {
291
+ "data": {
292
+ "application/vnd.jupyter.widget-view+json": {
293
+ "model_id": "525bf979329a498b90ce44405f13238a",
294
+ "version_major": 2,
295
+ "version_minor": 0
296
+ },
297
+ "text/plain": [
298
+ "Indexing tar file '/rmt/yada/dev/sakuga-scraper/tars/0.tar' ...: 0it [00:00, ?it/s]"
299
+ ]
300
+ },
301
+ "metadata": {},
302
+ "output_type": "display_data"
303
+ },
304
+ {
305
+ "data": {
306
+ "text/plain": [
307
+ "'/rmt/yada/dev/sakuga-scraper/tars/0.json'"
308
+ ]
309
+ },
310
+ "execution_count": 17,
311
+ "metadata": {},
312
+ "output_type": "execute_result"
313
+ }
314
+ ],
315
+ "source": [
316
+ "from hfutils.index.make import tar_create_index\n",
317
+ "\n",
318
+ "# Create index for the generated tars\n",
319
+ "tar_create_index(src_tar_file=\"/rmt/yada/dev/sakuga-scraper/tars/0.tar\")"
320
+ ]
321
+ },
322
+ {
323
+ "cell_type": "markdown",
324
+ "metadata": {},
325
+ "source": [
326
+ "testing partial tar read:"
327
+ ]
328
+ },
329
+ {
330
+ "cell_type": "code",
331
+ "execution_count": 18,
332
+ "metadata": {},
333
+ "outputs": [
334
+ {
335
+ "name": "stdout",
336
+ "output_type": "stream",
337
+ "text": [
338
+ "All files verified successfully via local partial-read checks.\n"
339
+ ]
340
+ }
341
+ ],
342
+ "source": [
343
+ "import hashlib\n",
344
+ "import json\n",
345
+ "import os\n",
346
+ "\n",
347
+ "def test_local_partial_read(tar_path: str, index_json_path: str):\n",
348
+ " \"\"\"\n",
349
+ " Perform local partial reads of an uncompressed tar file to verify offsets and sizes\n",
350
+ " match the metadata in the JSON index.\n",
351
+ "\n",
352
+ " :param tar_path: Path to the tar file.\n",
353
+ " :param index_json_path: Path to the JSON index (the .tar.json file).\n",
354
+ " \"\"\"\n",
355
+ " with open(index_json_path, 'r', encoding='utf-8') as jf:\n",
356
+ " index_data = json.load(jf)\n",
357
+ "\n",
358
+ " # Sanity check: Compare tar size\n",
359
+ " reported_size = index_data.get(\"filesize\", None)\n",
360
+ " actual_size = os.path.getsize(tar_path)\n",
361
+ " if reported_size and reported_size != actual_size:\n",
362
+ " raise ValueError(f\"Tar size mismatch! JSON says {reported_size} bytes, actual is {actual_size} bytes.\")\n",
363
+ "\n",
364
+ " with open(tar_path, \"rb\") as tar_f:\n",
365
+ " for fname, info in index_data[\"files\"].items():\n",
366
+ " offset = info[\"offset\"]\n",
367
+ " size = info[\"size\"]\n",
368
+ " expected_sha256 = info[\"sha256\"]\n",
369
+ "\n",
370
+ " # Seek to offset and read exactly `size` bytes\n",
371
+ " tar_f.seek(offset, 0)\n",
372
+ " data = tar_f.read(size)\n",
373
+ " if len(data) != size:\n",
374
+ " raise ValueError(f\"Read {len(data)} bytes instead of {size} for '{fname}'.\")\n",
375
+ "\n",
376
+ " # Compute SHA-256 and compare\n",
377
+ " actual_sha256 = hashlib.sha256(data).hexdigest()\n",
378
+ " if actual_sha256 != expected_sha256:\n",
379
+ " raise ValueError(f\"SHA mismatch for '{fname}'. Expected {expected_sha256}, got {actual_sha256}.\")\n",
380
+ "\n",
381
+ " print(\"All files verified successfully via local partial-read checks.\")\n",
382
+ "\n",
383
+ "\n",
384
+ "test_local_partial_read(\"../tars/0.tar\", \"../tars/0.json\")"
385
+ ]
386
+ },
387
+ {
388
+ "cell_type": "markdown",
389
+ "metadata": {},
390
+ "source": [
391
+ "tarring the entire set:"
392
+ ]
393
+ },
394
+ {
395
+ "cell_type": "code",
396
+ "execution_count": null,
397
+ "metadata": {},
398
+ "outputs": [
399
+ {
400
+ "data": {
401
+ "application/vnd.jupyter.widget-view+json": {
402
+ "model_id": "0557e54d0bce48f3aba219ebf8b470c5",
403
+ "version_major": 2,
404
+ "version_minor": 0
405
+ },
406
+ "text/plain": [
407
+ " 0%| | 0/26 [00:00<?, ?it/s]"
408
+ ]
409
+ },
410
+ "metadata": {},
411
+ "output_type": "display_data"
412
+ },
413
+ {
414
+ "name": "stdout",
415
+ "output_type": "stream",
416
+ "text": [
417
+ "Created tar: ../tars/1.tar with 3011 files.\n",
418
+ "['../tars/1.tar']\n"
419
+ ]
420
+ },
421
+ {
422
+ "data": {
423
+ "application/vnd.jupyter.widget-view+json": {
424
+ "model_id": "54217658da474bd8b03606cf6a56f1de",
425
+ "version_major": 2,
426
+ "version_minor": 0
427
+ },
428
+ "text/plain": [
429
+ "Indexing tar file '../tars/1.tar' ...: 0it [00:00, ?it/s]"
430
+ ]
431
+ },
432
+ "metadata": {},
433
+ "output_type": "display_data"
434
+ },
435
+ {
436
+ "name": "stdout",
437
+ "output_type": "stream",
438
+ "text": [
439
+ "All files verified successfully via local partial-read checks.\n",
440
+ "Created tar: ../tars/2.tar with 4460 files.\n",
441
+ "['../tars/2.tar']\n"
442
+ ]
443
+ },
444
+ {
445
+ "data": {
446
+ "application/vnd.jupyter.widget-view+json": {
447
+ "model_id": "d81ec72a9eb04d6881ffa06370c41778",
448
+ "version_major": 2,
449
+ "version_minor": 0
450
+ },
451
+ "text/plain": [
452
+ "Indexing tar file '../tars/2.tar' ...: 0it [00:00, ?it/s]"
453
+ ]
454
+ },
455
+ "metadata": {},
456
+ "output_type": "display_data"
457
+ },
458
+ {
459
+ "name": "stdout",
460
+ "output_type": "stream",
461
+ "text": [
462
+ "All files verified successfully via local partial-read checks.\n"
463
+ ]
464
+ }
465
+ ],
466
+ "source": [
467
+ "from tqdm.auto import tqdm\n",
468
+ "\n",
469
+ "# since we already created the first tar, we can start from 1\n",
470
+ "for i in tqdm(range(1, tar_count)):\n",
471
+ " tars_created = generate_tar_from_files(media_files, output_dir=\"../tars\", id=i)\n",
472
+ " print(tars_created)\n",
473
+ " tar_create_index(src_tar_file=tars_created[0])\n",
474
+ "\n",
475
+ " test_local_partial_read(tars_created[0], f\"../tars/{i}.json\")"
476
+ ]
477
+ }
478
+ ],
479
+ "metadata": {
480
+ "kernelspec": {
481
+ "display_name": "base",
482
+ "language": "python",
483
+ "name": "python3"
484
+ },
485
+ "language_info": {
486
+ "codemirror_mode": {
487
+ "name": "ipython",
488
+ "version": 3
489
+ },
490
+ "file_extension": ".py",
491
+ "mimetype": "text/x-python",
492
+ "name": "python",
493
+ "nbconvert_exporter": "python",
494
+ "pygments_lexer": "ipython3",
495
+ "version": "3.10.13"
496
+ }
497
+ },
498
+ "nbformat": 4,
499
+ "nbformat_minor": 2
500
+ }