Upload get_sampler.py with huggingface_hub
Browse files- get_sampler.py +31 -0
get_sampler.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import math
|
2 |
+
import json
|
3 |
+
|
4 |
+
|
5 |
+
with open('bucket_categorization.json', 'r') as fp:
|
6 |
+
bucket_data = json.load(fp)
|
7 |
+
|
8 |
+
|
9 |
+
print(len(bucket_data))
|
10 |
+
base_batch_size = 16
|
11 |
+
base_context_length = 1024 * 1024
|
12 |
+
|
13 |
+
all_data = 0
|
14 |
+
sampler_meta = []
|
15 |
+
for bucket, indices in bucket_data.items():
|
16 |
+
resolution = bucket.split('x')
|
17 |
+
height, width = int(resolution[0]), int(resolution[1])
|
18 |
+
batch_size = round(base_batch_size*base_context_length/(height*width))
|
19 |
+
batch_size = min(batch_size, 128)
|
20 |
+
num_batch = round(len(indices) / batch_size)
|
21 |
+
for i in range(num_batch):
|
22 |
+
current_indices = indices[i*batch_size: (i+1)*batch_size]
|
23 |
+
if len(current_indices) == batch_size:
|
24 |
+
all_data += batch_size
|
25 |
+
sampler_meta.append(current_indices)
|
26 |
+
|
27 |
+
print(all_data)
|
28 |
+
print(len(sampler_meta))
|
29 |
+
with open('bucket_sampler.json', 'w') as fp:
|
30 |
+
json.dump(sampler_meta, fp, indent=4)
|
31 |
+
|