File size: 5,051 Bytes
87876a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
from typing import Optional
import datasets
import io
import PIL
import PIL.PngImagePlugin
import os
import hashlib
import warnings
ASSEMBLED_COLUMNS = (
'sample_id',
'dataset_name',
'task_name',
'query',
'annotations',
'image',
'query_info',
'annotations_info',
'image_info',
'image_sha256'
)
def _hash_bytes(b: bytes) -> str:
m = hashlib.sha256()
m.update(b)
return m.hexdigest()
def get_bigdocs_75m(
formal_name: datasets.DatasetDict,
user_local_path: Optional[str],
load_from_cache_file:Optional[bool]=None,
num_proc: Optional[int]=None,
raise_on_missing: Optional[bool]=None,
skip_bad_sha256: Optional[bool]=None,
bigdocs_load_dataset_kwargs: Optional[dict]=None
) -> datasets.DatasetDict:
"""
Get a subset of BigDocs-7.5M
Some parts of BigDocs-7.5M are distributed without their "image" column,
and instead have an "img_id" column. The present function substitutes
such images back in.
For the following `formal_name`, the the user is responsible to download
the specified dataset and specify its location through `user_local_path`.
- COCOtext: http://images.cocodataset.org/zips/train2014.zip
- pubtables-1m: https://www.microsoft.com/en-us/research/publication/pubtables-1m
- TextOCR: https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip
Args:
formal_name (`DatasetDict`): The BigDocs-7.5M dataset to augment with local images.
user_local_path (`Optional[str]`, defaults to `None`): The local path containing the images to be linked.
load_from_cache_file (`Optional[bool], defaults to `None`): Passed to `map`, `filter` and the likes.
num_proc (`Optional[int], defaults to `None`): Passed to `map`, `filter` and the likes.
raise_on_missing (`Optional[bool]`, defaults to `None`):
Determines what to do when there is an error loading an image.
- `True`: raise an error.
- `None`: print a warning and skip the sample (default).
- `False`: silently skip the sample.
use_bad_sha256 (`Optional[bool], defaults to `None`):
Determines what to do when the sha256 integrity test fails.
- `True`: ignore the sha256 integrity test.
- `None`: print a warning and skip samples with bad sha256 (default).
- `False`: silently skip entries with bad sha256.
load_dataset_kwargs (`Optional[dict]`, defaults to `None`): Arguments passed to datasets.load_dataset .
"""
if bigdocs_load_dataset_kwargs is None:
bigdocs_load_dataset_kwargs = {}
unprocessed = datasets.load_dataset("ServiceNow/BigDocs-7.5M", formal_name, **bigdocs_load_dataset_kwargs)
def on_disk_processor(sample):
img_path = os.path.join(user_local_path, sample['img_id'])
# Load the image
try:
image = PIL.Image.open(img_path)
except Exception as e:
if raise_on_missing:
raise RuntimeError(f"Error loading image at {img_path}\n{e}")
if raise_on_missing is None:
warnings.warn(f"Skipping due to error loading image {img_path}", RuntimeWarning)
image = None # Sample will be filtered out
if image is not None:
# Place into `buffer` using PNG image format
buffer = io.BytesIO()
image.save(buffer, "png")
# Reload the image with guaranteed PNG format
image = PIL.Image.open(buffer)
# Check sha256
if not skip_bad_sha256:
sha256 = _hash_bytes(buffer.getvalue())
if sha256 != sample["image_sha256"]:
image = None # Sample will be filtered out
if skip_bad_sha256 is None:
warnings.warn(f"Skipping due to bad sha256 for {img_path}", RuntimeWarning)
return {"image": image}
# Get the correct processor
try:
processor = {
"COCOtext": on_disk_processor,
"pubtables-1m": on_disk_processor,
"TextOCR": on_disk_processor,
}[formal_name]
except KeyError:
raise ValueError(f"Unknown formal_name: {formal_name}")
if processor is on_disk_processor:
assert user_local_path is not None, f"user_local_path is mandatory for formal_name={formal_name}"
if processor is None:
processed = unprocessed
else:
processed = unprocessed.map(
processor,
remove_columns="img_id",
load_from_cache_file=load_from_cache_file,
num_proc=num_proc
)
# Drop missing images.
if not raise_on_missing:
processed = processed.filter((lambda image: image is not None), input_columns="image", num_proc=num_proc)
# Column order
processed = processed.select_columns(list(ASSEMBLED_COLUMNS))
return processed
|