|
import os |
|
import re |
|
import heapq |
|
import pickle |
|
import struct |
|
import contextlib |
|
import numpy as np |
|
from typing import Generator |
|
from pathlib import Path |
|
from dataclasses import dataclass, field |
|
from fsspec.spec import AbstractBufferedFile |
|
from datatrove.data import DocumentsPipeline |
|
from datatrove.io import DataFolderLike, get_datafolder |
|
from datatrove.pipeline.base import PipelineStep |
|
from datatrove.pipeline.writers.disk_base import DiskWriter |
|
from datatrove.utils.binaryio import read_tuples_from_file, seek_to_start |
|
from datatrove.utils.hashing import HashConfig, create_hash_func |
|
from datatrove.utils.logging import logger |
|
from datatrove.utils.text import TextNormConfig, ngrams |
|
from datatrove.utils.typeshelper import StatHints |
|
|
|
|
|
|
|
_mersenne_prime = np.uint64((1 << 61) - 1) |
|
|
|
SENTINEL = (1 << 32) - 1 |
|
|
|
dataset_map = {} |
|
datasets = [] |
|
cnt = 0 |
|
|
|
def transform(dataset_idx: int, line_no: int) -> int: |
|
global cnt |
|
global dataset_map |
|
global datasets |
|
if dataset_idx not in dataset_map: |
|
dataset_map[dataset_idx] = cnt |
|
datasets.append(dataset_idx) |
|
cnt += 1 |
|
return line_no * 10000 + dataset_map[dataset_idx] |
|
|
|
|
|
@dataclass |
|
class CustomMinhashConfig: |
|
"""Configuration for Min-Hash deduplication |
|
|
|
Args: |
|
n_grams: n-grams size to use |
|
num_buckets: number of buckets to use |
|
seed: random seed used to generate the hash function parameters. Should be the same on all workers to ensure they all have the same parameters |
|
""" |
|
|
|
n_grams: int = 13 |
|
num_buckets: int = 9 |
|
num_hashs: int = 128 |
|
seed: int = 1 |
|
|
|
norm_config: TextNormConfig = field(default_factory=TextNormConfig) |
|
hash_config: HashConfig = field(default_factory=HashConfig) |
|
|
|
def __str__(self): |
|
return f"{self.n_grams}ng_{self.num_buckets}bs_{self.hash_config}" |
|
|
|
|
|
@dataclass(order=True) |
|
class HashSig: |
|
"""Hash signature for a given document in a given bucket |
|
|
|
Args: |
|
sig: tuple of hashes |
|
file_id: file id |
|
doc_id: document id |
|
reader_id: reader id. Used to know from where the next signature should be requested |
|
""" |
|
|
|
sig: tuple[int] |
|
doc_id: int |
|
|
|
data_index: int |
|
reader_id: int |
|
|
|
def is_from_index(self): |
|
return False |
|
|
|
|
|
def read_sigs( |
|
file: AbstractBufferedFile, |
|
config: CustomMinhashConfig, |
|
index_file: bool = False, |
|
min_hash: int = 0, |
|
max_hash: int = _mersenne_prime, |
|
ensure_order: bool = True, |
|
lines_to_buffer: int = 5, |
|
reader_id: int = 0, |
|
) -> Generator: |
|
"""Read signatures from a file |
|
|
|
Args: |
|
file: file to read from |
|
reader_id: reader id |
|
config: minhash configuration (a MinhashConfig object) |
|
index_file: is index file |
|
""" |
|
line_format = f"{config.n_grams}{config.hash_config.struct_format}{'I' if not index_file else ''}" |
|
with file as f: |
|
if f.size == 0: |
|
return |
|
seek_to_start(f, min_hash, line_format, config.hash_config.struct_format) |
|
last = None |
|
|
|
file_stem = Path(file.path).name.removesuffix(".minhash.sig") |
|
|
|
data_index = 0 |
|
if len(file_stem) == 21: |
|
dataset_year = int(file_stem[8:12]) |
|
dataset_num = int(file_stem[13:15]) |
|
parquet_num = int(file_stem[16:21]) |
|
|
|
data_index = ((dataset_year % 100) * 100 + dataset_num) * 100000 + parquet_num |
|
|
|
for data in read_tuples_from_file(f, line_format, lines_to_buffer=lines_to_buffer): |
|
sigdata = data if index_file else data[:-1] |
|
assert sigdata[0] >= min_hash and ( |
|
ensure_order is False or last is None or sigdata >= last |
|
), f"Hash order error. {f.tell()=}, {min_hash=}, {sigdata=}, {last=}" |
|
if sigdata[0] >= max_hash: |
|
break |
|
last = sigdata |
|
yield ( |
|
HashSig(sig=sigdata, doc_id=-1, data_index=data_index, reader_id=reader_id) |
|
if index_file |
|
else HashSig(sig=sigdata, doc_id=data[-1], data_index=data_index, reader_id=reader_id) |
|
) |
|
|
|
|
|
class CustomMinhashDedupSignature(PipelineStep): |
|
"""Minhash Deduplication: First Pipeline Step |
|
|
|
Compute the minhash signature for each document and write it to disk. |
|
|
|
Args: |
|
output_folder: output folder |
|
config: minhash configuration (a MinhashConfig object) |
|
""" |
|
|
|
type = "🫂 - DEDUP" |
|
name = "🎯 MinHash stage 1" |
|
|
|
def __init__(self, output_folder: DataFolderLike, config: CustomMinhashConfig = None, naming_prefix: str = ''): |
|
super().__init__() |
|
self.output_folder = get_datafolder(output_folder) |
|
self.config = config or CustomMinhashConfig() |
|
self.num_hashes = self.config.num_hashs |
|
self.naming_prefix = naming_prefix |
|
self._parameters = None |
|
self._hash_func = create_hash_func(self.config.hash_config) |
|
|
|
@property |
|
def parameters(self): |
|
"""Minhash parameters |
|
|
|
Create parameters for a random bijective permutation function |
|
that maps a 32/64-bit hash value to another 32/64-bit hash value. |
|
http://en.wikipedia.org/wiki/Universal_hashing |
|
|
|
Note: For 64-bit hashes the upper-bound for codomain is not [0,2**64) but [0,2**61 - 1) |
|
""" |
|
if self._parameters is None: |
|
gen = np.random.RandomState(self.config.seed) |
|
self._parameters = np.array( |
|
[ |
|
( |
|
gen.randint(1, _mersenne_prime, dtype=np.uint64), |
|
gen.randint(0, _mersenne_prime, dtype=np.uint64), |
|
) |
|
for _ in range(self.num_hashes) |
|
], |
|
dtype=np.uint64, |
|
).T |
|
return self._parameters |
|
|
|
def get_signature(self, shingles: np.ndarray) -> list[list[int]]: |
|
"""Get the signature for a set of shingles (n-grams) |
|
|
|
Args: |
|
shingles: shingles (n-grams) numpy uint64 array of size (N, 1) |
|
|
|
Returns: |
|
list (num buckets) of lists of integers (hashes) |
|
""" |
|
a, b = self.parameters |
|
phv = (shingles * a + b) % _mersenne_prime |
|
if self.config.hash_config.precision == 32: |
|
phv = np.bitwise_and(phv, self.config.hash_config.max) |
|
return [ |
|
x.tolist() |
|
for x in np.split(np.min(phv, axis=0)[:self.config.num_buckets * self.config.n_grams], self.config.num_buckets) |
|
] |
|
|
|
def get_shingles(self, text: str) -> np.ndarray: |
|
"""Get shingles (hashed n-grams) from a string of text |
|
|
|
Shingles are created by hashing n-grams of simplified text (lower cases, whitespace normalized, no punctuation, etc). |
|
|
|
Args: |
|
text: input text |
|
|
|
Returns: |
|
numpy array of shingles: dtype = uint64, shape = (number of n_grams in string, 1) |
|
""" |
|
return np.fromiter( |
|
[ |
|
self._hash_func("".join(x)) |
|
for x in ngrams(text, self.config.n_grams) |
|
], |
|
dtype=np.uint64, |
|
).reshape((-1, 1)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run(self, data: DocumentsPipeline, rank: int = 0, world_size: int = 1) -> DocumentsPipeline: |
|
buckets = [ |
|
self.output_folder.open(f"bucket_{bi:03d}/{self.naming_prefix}_{rank:05d}.minhash.sig", mode="wb") |
|
for bi in range(self.config.num_buckets) |
|
] |
|
with self.track_time(): |
|
|
|
for doc_idx, doc in enumerate(data): |
|
self.stat_update(StatHints.total) |
|
|
|
shingles = self.get_shingles(doc.text) |
|
if shingles.size != 0: |
|
sig = self.get_signature(shingles) |
|
for bi, (bucket, bucket_sig) in enumerate(zip(buckets, sig)): |
|
|
|
bucket.write( |
|
struct.pack( |
|
f"<{self.config.n_grams}{self.config.hash_config.struct_format}I", |
|
*bucket_sig, |
|
doc_idx, |
|
) |
|
) |
|
for file in buckets: |
|
file.close() |
|
|
|
logger.info("Sorting buckets...") |
|
for bi in range(len(buckets)): |
|
|
|
sigs = sorted( |
|
read_sigs( |
|
self.output_folder.open(f"bucket_{bi:03d}/{self.naming_prefix}_{rank:05d}.minhash.sig", mode="rb"), |
|
self.config, |
|
ensure_order=False, |
|
lines_to_buffer=-1, |
|
) |
|
) |
|
with self.output_folder.open(f"bucket_{bi:03d}/{self.naming_prefix}_{rank:05d}.minhash.sig", mode="wb") as fo: |
|
for sig in sigs: |
|
fo.write( |
|
struct.pack( |
|
f"<{self.config.n_grams}{self.config.hash_config.struct_format}I", |
|
*sig.sig, |
|
sig.doc_id, |
|
) |
|
) |
|
|
|
|
|
class CustomMinhashDedupBuckets(PipelineStep): |
|
"""Minhash Deduplication: Second Pipeline Step |
|
|
|
Find duplicate pairs from the signatures and possibly an index. Can also save an index with the new signatures. |
|
|
|
Args: |
|
input_folder: input folder containing the signature from step 1 |
|
output_folder: output folder where results (document duplicate pairs) will be saved |
|
index_folder: index folder. If set, we will load all index files in this folder and use them as a reference for deduplicating the current dataset (remove any matches on our dataset with signatures from the index) |
|
config: minhash configuration (a MinhashConfig object) |
|
only_dedup_in_index: only deduplicate versus index (ignore any matches between 2 documents in our input dataset) |
|
create_index_name: create index name. If this parameter is set, index files will be created with this name that other datasets can use as a reference for dedup. Set to `None` to disable index file creation. |
|
""" |
|
|
|
type = "🫂 - DEDUP" |
|
name = "🎯 MinHash stage 2" |
|
|
|
def __init__( |
|
self, |
|
input_folder: DataFolderLike, |
|
output_folder: DataFolderLike, |
|
config: CustomMinhashConfig = None, |
|
lines_to_buffer: int = 5, |
|
): |
|
super().__init__() |
|
self.input_folder = get_datafolder(input_folder) |
|
self.output_folder = get_datafolder(output_folder) |
|
self.config = config or CustomMinhashConfig() |
|
self.lines_to_buffer = lines_to_buffer |
|
|
|
def get_worker_hash_range(self, sig_files, rank, world_size): |
|
workers_per_bucket = world_size // self.config.num_buckets |
|
bucket, bucket_worker = divmod(rank, workers_per_bucket) |
|
hash_min, hash_max = ( |
|
0, |
|
_mersenne_prime if self.config.hash_config.precision == 64 else self.config.hash_config.max, |
|
) |
|
if workers_per_bucket > 1 and len(sig_files): |
|
|
|
|
|
with self.input_folder.open(sig_files[0], mode="rb") as f: |
|
line_size = struct.calcsize(f"{self.config.n_grams}{self.config.hash_config.struct_format}I") |
|
L, rem = divmod(f.size, line_size) |
|
assert rem == 0, "file size not divisible by line size" |
|
assert L >= workers_per_bucket, f"tried to use {workers_per_bucket=} but there are only {L} lines" |
|
if bucket_worker > 0: |
|
|
|
f.seek(line_size * (L // workers_per_bucket) * bucket_worker, os.SEEK_SET) |
|
hash_min = struct.unpack( |
|
self.config.hash_config.struct_format, |
|
f.read(struct.calcsize(self.config.hash_config.struct_format)), |
|
)[0] |
|
if bucket_worker + 1 < workers_per_bucket: |
|
|
|
f.seek(line_size * (L // workers_per_bucket) * (bucket_worker + 1), os.SEEK_SET) |
|
hash_max = struct.unpack( |
|
self.config.hash_config.struct_format, |
|
f.read(struct.calcsize(self.config.hash_config.struct_format)), |
|
)[0] |
|
return hash_min, hash_max |
|
|
|
def run(self, data: DocumentsPipeline = None, rank: int = 0, world_size: int = 1): |
|
assert data is None, "You should not use an input block before MinhashDedupBuckets" |
|
assert (world_size % self.config.num_buckets) == 0, "Number of tasks must be divisible by num_buckets" |
|
workers_per_bucket = world_size // self.config.num_buckets |
|
bucket, bucket_worker = divmod(rank, workers_per_bucket) |
|
|
|
with self.track_time(): |
|
sig_files = self.input_folder.list_files(subdirectory=f"bucket_{bucket:03d}") |
|
hash_min, hash_max = self.get_worker_hash_range(sig_files, rank, world_size) |
|
|
|
logger.info( |
|
f"Running worker {bucket_worker + 1}/{workers_per_bucket} on bucket {bucket:03d}. " |
|
f"Hash range: {[hash_min, hash_max]}" |
|
) |
|
|
|
sig_readers = [ |
|
read_sigs( |
|
file, |
|
self.config, |
|
min_hash=hash_min, |
|
max_hash=hash_max, |
|
lines_to_buffer=self.lines_to_buffer, |
|
reader_id=file_i, |
|
) |
|
for file_i, file in enumerate(self.input_folder.open_files(sig_files, mode="rb")) |
|
] |
|
|
|
pq = [x for x in [next(sig_reader, None) for sig_reader in sig_readers] if x is not None] |
|
heapq.heapify(pq) |
|
logger.info("Finished initializing signatures priority queue.") |
|
|
|
with self.output_folder.open(f"{bucket:05d}_{bucket_worker:02d}.dups", mode="wb") as out_f: |
|
last: HashSig | None = None |
|
while pq: |
|
v: HashSig = heapq.heappop(pq) |
|
assert last is None or v >= last, f"Sig queue sort error. {v=} < {last=}" |
|
if last and last.sig == v.sig: |
|
|
|
out_f.write( |
|
struct.pack("<4I", last.data_index, last.doc_id, v.data_index, v.doc_id) |
|
) |
|
self.stat_update("total_matches") |
|
last = v |
|
next_sig = next(sig_readers[v.reader_id], None) |
|
if next_sig: |
|
assert next_sig >= v, f"Next sig sort error. {next_sig=} < {v=}" |
|
heapq.heappush(pq, next_sig) |
|
|
|
|
|
class CustomMinhashDedupCluster(PipelineStep): |
|
"""Minhash Deduplication: Third Pipeline Step |
|
|
|
Cluster the documents using the previously found duplicate pairs. If A-B and B-C are duplicate pairs, then we will have the A-B-C cluster. Only one document per cluster will be kept after filtering |
|
""" |
|
|
|
type = "🫂 - DEDUP" |
|
name = "🎯 MinHash stage 3" |
|
|
|
def __init__( |
|
self, |
|
input_folder: DataFolderLike, |
|
output_folder: DataFolderLike, |
|
config: CustomMinhashConfig = None, |
|
ignore_index_matches: bool = False, |
|
lines_to_buffer: int = 5, |
|
): |
|
super().__init__() |
|
self.input_folder = get_datafolder(input_folder) |
|
self.output_folder = get_datafolder(output_folder) |
|
self.config = config or CustomMinhashConfig() |
|
self.ignore_index_matches = ignore_index_matches |
|
self.lines_to_buffer = lines_to_buffer |
|
|
|
def run(self, data: DocumentsPipeline = None, _: int = 0, world_size: int = 1): |
|
global datasets |
|
global dataset_map |
|
|
|
dup_files = self.input_folder.list_files(glob_pattern="*.dups") |
|
assert ( |
|
len(dup_files) % self.config.num_buckets |
|
) == 0, "Number of .dups files should be divisible by number of buckets" |
|
assert world_size == 1, "World size must be 1 for clustering" |
|
union_set = np.arange(0, 1_500_000 * 10_000, dtype=np.uint64) |
|
exists = np.zeros(1_500_000 * 10_000, dtype=bool) |
|
|
|
max_no = 0 |
|
|
|
def parent(x): |
|
exists[x] = 1 |
|
if union_set[x] == x: |
|
return x |
|
|
|
union_set[x] = parent(union_set[x]) |
|
return union_set[x] |
|
|
|
with self.track_time(): |
|
for dup_file in dup_files: |
|
with self.input_folder.open(dup_file, "rb") as dupf: |
|
logger.info(f"Processing {dup_file}") |
|
for f1, d1, f2, d2 in read_tuples_from_file(dupf, "4I", lines_to_buffer=self.lines_to_buffer): |
|
a, b = transform(f1, d1), transform(f2, d2) |
|
if a > max_no: |
|
max_no = a |
|
if b > max_no: |
|
max_no = b |
|
union_set[parent(b)] = parent(a) |
|
|
|
logger.info("Outputing") |
|
with self.output_folder.get_output_file_manager(mode="wb") as output_mg: |
|
for node in range(max_no + 1): |
|
if exists[node]: |
|
self.stat_update("duplicates") |
|
p = parent(node) |
|
if node != p: |
|
dataset_idx = datasets[node % 10000] |
|
dataset_year = dataset_idx // 10000000 |
|
dataset_num = (dataset_idx // 100000) % 100 |
|
parquet_num = dataset_idx % 100000 |
|
output_mg.write(f"CC-MAIN-20{dataset_year:02d}-{dataset_num:02d}_{parquet_num:05d}.remove", struct.pack("<I", node // 10000)) |
|
self.stat_update("to_remove") |
|
|
|
|
|
class CustomMinhashDedupFilter(PipelineStep): |
|
"""Minhash Deduplication: Fourth (and final) Pipeline Step |
|
|
|
Filter the documents based on the minhash clusters to keep only one per cluster |
|
""" |
|
|
|
type = "🫂 - DEDUP" |
|
name = "🎯 MinHash stage 4" |
|
|
|
def __init__( |
|
self, |
|
remove_id_input_folder: DataFolderLike, |
|
sig_input_folder: DataFolderLike, |
|
exclusion_writer: DiskWriter = None, |
|
lines_to_buffer: int = 5, |
|
naming_prefix: str = '', |
|
config: CustomMinhashConfig = None, |
|
): |
|
super().__init__() |
|
self.remove_id_folder = get_datafolder(remove_id_input_folder) |
|
self.sig_folder = get_datafolder(sig_input_folder) |
|
self.exclusion_writer = exclusion_writer |
|
self.lines_to_buffer = lines_to_buffer |
|
self.naming_prefix = naming_prefix |
|
self.config = config or CustomMinhashConfig() |
|
|
|
def run(self, data: DocumentsPipeline, rank: int = 0, world_size: int = 1): |
|
files = self.remove_id_folder.list_files(glob_pattern=f"{self.naming_prefix}*{rank:05d}.remove") |
|
if not files or len(files) == 0: |
|
logger.info(f"Found 0 files by pattern {self.naming_prefix}*{rank:05d}.remove, maybe no dups") |
|
for bucket in range(self.config.num_buckets): |
|
for sig_file_name in self.sig_folder.list_files(glob_pattern=f"bucket_{bucket:03d}/{self.naming_prefix}*{rank:05d}.minhash.sig"): |
|
file_name = Path(sig_file_name).name.removesuffix(".minhash.sig") |
|
save_docs = [] |
|
for sig in read_sigs( |
|
self.sig_folder.open(f"bucket_{bucket:03d}/{file_name}.minhash.sig", "rb"), |
|
self.config, |
|
ensure_order=False, |
|
): |
|
self.stat_update(StatHints.total) |
|
save_doc = {} |
|
save_doc['doc_id'] = file_name + ':' + str(sig.doc_id) |
|
save_doc['hash'] = bytes(np.array(sig.sig).astype(np.uint64).byteswap().data) |
|
save_docs.append(save_doc) |
|
self.stat_update(StatHints.forwarded) |
|
with self.sig_folder.open(f"bucket_{bucket:03d}/{file_name}.pkl", "wb") as out_file: |
|
pickle.dump(save_docs, out_file) |
|
|
|
return |
|
|
|
single_int = struct.Struct("<I") |
|
for file in files: |
|
logger.info(f"Processing {file}") |
|
|
|
remove_id_file = self.remove_id_folder.open(file, "rb") |
|
logger.info(remove_id_file) |
|
remove_ids = set() |
|
while True: |
|
chunk = remove_id_file.read(single_int.size) |
|
if not chunk: |
|
break |
|
remove_ids.add(single_int.unpack(chunk)) |
|
remove_id_file.close() |
|
|
|
file_name = Path(file).name.removesuffix(".remove") |
|
for bucket in range(self.config.num_buckets): |
|
save_docs = [] |
|
for sig in read_sigs( |
|
self.sig_folder.open(f"bucket_{bucket:03d}/{file_name}.minhash.sig", "rb"), |
|
self.config, |
|
ensure_order=False, |
|
): |
|
self.stat_update(StatHints.total) |
|
if sig.doc_id in remove_ids: |
|
continue |
|
save_doc = {} |
|
save_doc['doc_id'] = file_name + ':' + str(sig.doc_id) |
|
save_doc['hash'] = bytes(np.array(sig.sig).astype(np.uint64).byteswap().data) |
|
save_docs.append(save_doc) |
|
self.stat_update(StatHints.forwarded) |
|
with self.sig_folder.open(f"bucket_{bucket:03d}/{file_name}.pkl", "wb") as out_file: |
|
pickle.dump(save_docs, out_file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|