GoodEnough commited on
Commit
3691021
·
verified ·
1 Parent(s): bad09e8

Upload folder using huggingface_hub

Browse files
t2i_dtm_xl/toy_dataset/bucket_categorization.json ADDED
The diff for this file is too large to render. See raw diff
 
t2i_dtm_xl/toy_dataset/bucket_sampler.json ADDED
The diff for this file is too large to render. See raw diff
 
t2i_dtm_xl/toy_dataset/data_info.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
t2i_dtm_xl/toy_dataset/dataset.tar.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4938a18061bc7161e7dfea3e93923d2cf54057e091a1749688b3f1d6a31591ef
3
+ size 2562182182
t2i_dtm_xl/toy_dataset/get_sampler.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import json
3
+
4
+
5
+ with open('bucket_categorization.json', 'r') as fp:
6
+ bucket_data = json.load(fp)
7
+
8
+
9
+ print(len(bucket_data))
10
+ base_batch_size = 16
11
+ base_context_length = 1024 * 1024
12
+
13
+ all_data = 0
14
+ sampler_meta = []
15
+ for bucket, indices in bucket_data.items():
16
+ resolution = bucket.split('x')
17
+ height, width = int(resolution[0]), int(resolution[1])
18
+ batch_size = round(base_batch_size*base_context_length/(height*width))
19
+ batch_size = min(batch_size, 128)
20
+ num_batch = round(len(indices) / batch_size)
21
+ for i in range(num_batch):
22
+ current_indices = indices[i*batch_size: (i+1)*batch_size]
23
+ if len(current_indices) == batch_size:
24
+ all_data += batch_size
25
+ sampler_meta.append(current_indices)
26
+
27
+ print(all_data)
28
+ print(len(sampler_meta))
29
+ with open('bucket_sampler.json', 'w') as fp:
30
+ json.dump(sampler_meta, fp, indent=4)
31
+
t2i_dtm_xl/toy_dataset/unzip.sh ADDED
@@ -0,0 +1 @@
 
 
1
+ tar -xzvf dataset.tar.gz