slz1 commited on
Commit
bf5834d
·
verified ·
1 Parent(s): 59b592b

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .DS_Store +0 -0
  2. .gitignore +162 -0
  3. LICENSE +201 -0
  4. README.md +362 -0
  5. assets/comparison.png +3 -0
  6. assets/logo.jpeg +3 -0
  7. assets/vis.png +3 -0
  8. autoregressive/models/README.md +7 -0
  9. autoregressive/models/dinov2_adapter.py +36 -0
  10. autoregressive/models/generate.py +204 -0
  11. autoregressive/models/gpt.py +550 -0
  12. autoregressive/models/gpt_t2i.py +569 -0
  13. autoregressive/models/vit_adapter.py +26 -0
  14. autoregressive/sample/sample_c2i_ddp.py +188 -0
  15. autoregressive/sample/sample_t2i_ddp.py +229 -0
  16. autoregressive/serve/llm.py +267 -0
  17. create_npz.py +30 -0
  18. hfd.sh +328 -0
  19. requirements.txt +22 -0
  20. scripts/tokenizer/train_vq_finetune.sh +14 -0
  21. scripts/tokenizer/train_vq_finetune_continue.sh +15 -0
  22. scripts/tokenizer/val.sh +8 -0
  23. test_dataset_t2icontrol.py +55 -0
  24. tokenizer/consistencydecoder/README.md +14 -0
  25. tokenizer/consistencydecoder/reconstruction_cd_ddp.py +208 -0
  26. tokenizer/tokenizer_image/discriminator.py +255 -0
  27. tokenizer/tokenizer_image/discriminator_patchgan.py +152 -0
  28. tokenizer/tokenizer_image/discriminator_stylegan.py +101 -0
  29. tokenizer/tokenizer_image/lpips.py +164 -0
  30. tokenizer/tokenizer_image/reconstruction_vq_ddp.py +207 -0
  31. tokenizer/tokenizer_image/vq_demo.py +84 -0
  32. tokenizer/tokenizer_image/vq_loss.py +168 -0
  33. tokenizer/tokenizer_image/vq_model.py +425 -0
  34. tokenizer/vae/README.md +14 -0
  35. tokenizer/vae/reconstruction_vae_ddp.py +210 -0
  36. tokenizer/vae/sd_vae_demo.py +57 -0
  37. tokenizer/validation/val_ddp.py +165 -0
  38. tools/openimage_json.py +75 -0
  39. tools/push_vae_to_hf.py +48 -0
  40. utils/__init__.py +0 -0
  41. utils/__pycache__/__init__.cpython-310.pyc +0 -0
  42. utils/__pycache__/__init__.cpython-38.pyc +0 -0
  43. utils/__pycache__/distributed.cpython-310.pyc +0 -0
  44. utils/__pycache__/distributed.cpython-38.pyc +0 -0
  45. utils/deepspeed.py +87 -0
  46. utils/distributed.py +58 -0
  47. utils/drop_path.py +36 -0
  48. utils/ema.py +22 -0
  49. utils/logger.py +19 -0
  50. utils/video.py +116 -0
.DS_Store ADDED
Binary file (6.15 kB). View file
 
.gitignore ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
README.md ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align ="center">
2
+ <img src="./assets/logo.jpeg" width="20%">
3
+ <h1> ControlAR </h1>
4
+ <h3> Controllable Image Generation with Autoregressive Models </h3>
5
+
6
+ Zongming Li<sup>1,\*</sup>, [Tianheng Cheng](https://scholar.google.com/citations?user=PH8rJHYAAAAJ&hl=zh-CN)<sup>1,\*</sup>, [Shoufa Chen](https://shoufachen.com/)<sup>2</sup>, [Peize Sun](https://peizesun.github.io/)<sup>2</sup>, Haocheng Shen<sup>3</sup>,Longjin Ran<sup>3</sup>, Xiaoxin Chen<sup>3</sup>, [Wenyu Liu](http://eic.hust.edu.cn/professor/liuwenyu)<sup>1</sup>, [Xinggang Wang](https://xwcv.github.io/)<sup>1,📧</sup>
7
+
8
+ <sup>1</sup> Huazhong University of Science and Technology,
9
+ <sup>2</sup> The University of Hong Kong
10
+ <sup>3</sup> vivo AI Lab
11
+
12
+ <b>ICLR 2025</b>
13
+
14
+ (\* equal contribution, 📧 corresponding author)
15
+
16
+ [![arxiv paper](https://img.shields.io/badge/arXiv-Paper-red)](https://arxiv.org/abs/2410.02705)
17
+ [![demo](https://img.shields.io/badge/Demo-🤗-orange)](https://huggingface.co/spaces/wondervictor/ControlAR)
18
+ [![checkpoints](https://img.shields.io/badge/HuggingFace-🤗-green)](https://huggingface.co/wondervictor/ControlAR)
19
+
20
+ </div>
21
+
22
+
23
+ <div align="center">
24
+ <img src="./assets/vis.png">
25
+ </div>
26
+
27
+
28
+ ## News
29
+ `[2025-01-23]:` Our ControlAR has been accepted by ICLR 2025 🚀 !\
30
+ `[2024-12-12]:` We introduce a control strength factor, employ a larger control encoder(dinov2-base), and optimize text alignment capabilities along with generation diversity. New model weight: depth_base.safetensors and edge_base.safetensors. The edge_base.safetensors can handle three types of edges, including Canny, HED, and Lineart.\
31
+ `[2024-10-31]:` The code and models have been released!\
32
+ `[2024-10-04]:` We have released the [technical report of ControlAR](https://arxiv.org/abs/2410.02705). Code, models, and demos are coming soon!
33
+
34
+
35
+ ## Highlights
36
+
37
+ * ControlAR explores an effective yet simple *conditional decoding* strategy for adding spatial controls to autoregressive models, e.g., [LlamaGen](https://github.com/FoundationVision/LlamaGen), from a sequence perspective.
38
+
39
+ * ControlAR supports *arbitrary-resolution* image generation with autoregressive models without hand-crafted special tokens or resolution-aware prompts.
40
+
41
+ ## TODO
42
+
43
+ - [x] release code & models.
44
+ - [x] release demo code and HuggingFace demo: [HuggingFace Spaces 🤗](https://huggingface.co/spaces/wondervictor/ControlAR)
45
+
46
+
47
+ ## Results
48
+
49
+ We provide both quantitative and qualitative comparisons with diffusion-based methods in the technical report!
50
+
51
+ <div align="center">
52
+ <img src="./assets/comparison.png">
53
+ </div>
54
+
55
+
56
+ ## Models
57
+
58
+ We released checkpoints of text-to-image ControlAR on different controls and settings, *i.e.* arbitrary-resolution generation.
59
+
60
+ | AR Model | Type | Control encoder | Control | Arbitrary-Resolution | Checkpoint |
61
+ | :--------| :--: | :-------------: | :-----: | :------------------: | :--------: |
62
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-small | Canny Edge | ✅ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/canny_MR.safetensors) |
63
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-small | Depth | ✅ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/depth_MR.safetensors) |
64
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-small | HED Edge | ❌ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/hed.safetensors) |
65
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-small | Seg. Mask | ❌ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/seg_cocostuff.safetensors) |
66
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-base | Edge (Canny, Hed, Lineart) | ❌ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/edge_base.safetensors) |
67
+ | [LlamaGen-XL](https://github.com/FoundationVision/LlamaGen#-text-conditional-image-generation) | t2i | DINOv2-base | Depth | ❌ | [ckpt](https://huggingface.co/wondervictor/ControlAR/blob/main/depth_base.safetensors) |
68
+
69
+
70
+
71
+ ## Getting Started
72
+
73
+ ### Installation
74
+
75
+ ```bash
76
+ conda create -n ControlAR python=3.10
77
+ git clone https://github.com/hustvl/ControlAR.git
78
+ cd ControlAR
79
+ pip install torch==2.1.2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118
80
+ pip install -r requirements.txt
81
+ pip3 install -U openmim
82
+ mim install mmengine
83
+ mim install "mmcv==2.1.0"
84
+ pip3 install "mmsegmentation>=1.0.0"
85
+ pip3 install mmdet
86
+ git clone https://github.com/open-mmlab/mmsegmentation.git
87
+ ```
88
+
89
+ ### Pretrained Checkpoints for ControlAR
90
+
91
+ |tokenizer| text encoder |LlamaGen-B|LlamaGen-L|LlamaGen-XL|
92
+ |:-------:|:------------:|:--------:|:--------:|:---------:|
93
+ |[vq_ds16_t2i.pt](https://huggingface.co/peizesun/llamagen_t2i/resolve/main/vq_ds16_t2i.pt)|[flan-t5-xl](https://huggingface.co/google/flan-t5-xl)|[c2i_B_256.pt](https://huggingface.co/FoundationVision/LlamaGen/resolve/main/c2i_B_256.pt)|[c2i_L_256.pt](https://huggingface.co/FoundationVision/LlamaGen/resolve/main/c2i_L_256.pt)|[t2i_XL_512.pt](https://huggingface.co/peizesun/llamagen_t2i/resolve/main/t2i_XL_stage2_512.pt)|
94
+
95
+ We recommend storing them in the following structures:
96
+ ```
97
+ |---checkpoints
98
+ |---t2i
99
+ |---canny/canny_MR.safetensors
100
+ |---hed/hed.safetensors
101
+ |---depth/depth_MR.safetensors
102
+ |---seg/seg_cocostuff.safetensors
103
+ |---edge_base.safetensors
104
+ |---depth_base.safetensors
105
+ |---t5-ckpt
106
+ |---flan-t5-xl
107
+ |---config.json
108
+ |---pytorch_model-00001-of-00002.bin
109
+ |---pytorch_model-00002-of-00002.bin
110
+ |---pytorch_model.bin.index.json
111
+ |---tokenizer.json
112
+ |---vq
113
+ |---vq_ds16_c2i.pt
114
+ |---vq_ds16_t2i.pt
115
+ |---llamagen (Only necessary for training)
116
+ |---c2i_B_256.pt
117
+ |---c2i_L_256.pt
118
+ |---t2i_XL_stage2_512.pt
119
+ ```
120
+
121
+ ### Demo
122
+
123
+ Coming soon...
124
+
125
+
126
+ ### Sample & Generation
127
+
128
+ #### 1. Class-to-image genetation
129
+
130
+ ```bash
131
+ python autoregressive/sample/sample_c2i.py \
132
+ --vq-ckpt checkpoints/vq/vq_ds16_c2i.pt \
133
+ --gpt-ckpt checkpoints/c2i/canny/LlamaGen-L.pt \
134
+ --gpt-model GPT-L --seed 0 --condition-type canny
135
+ ```
136
+
137
+ #### 2. Text-to-image generation
138
+
139
+ *Generate an image using HED edge and text-to-image ControlAR:*
140
+
141
+ ```bash
142
+ python autoregressive/sample/sample_t2i.py \
143
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
144
+ --gpt-ckpt checkpoints/t2i/hed/hed.safetensors \
145
+ --gpt-model GPT-XL --image-size 512 \
146
+ --condition-type hed --seed 0 --condition-path condition/example/t2i/multigen/eye.png
147
+ ```
148
+ *Generate an image using segmentation mask and text-to-image ControlAR:*
149
+
150
+ ```bash
151
+ python autoregressive/sample/sample_t2i.py \
152
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
153
+ --gpt-ckpt checkpoints/t2i/seg/seg_cocostuff.safetensors \
154
+ --gpt-model GPT-XL --image-size 512 \
155
+ --condition-type seg --seed 0 --condition-path condition/example/t2i/cocostuff/doll.png \
156
+ --prompt 'A stuffed animal wearing a mask and a leash, sitting on a pink blanket'
157
+ ```
158
+
159
+ #### 3. Text-to-image generation with adjustable control strength
160
+ *Generate an image using depth map and text-to-image ControlAR:*
161
+
162
+ ```bash
163
+ python autoregressive/sample/sample_t2i.py \
164
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
165
+ --gpt-ckpt checkpoints/t2i/depth_base.safetensors \
166
+ --gpt-model GPT-XL --image-size 512 \
167
+ --condition-type seg --seed 0 --condition-path condition/example/t2i/multigen/bird.jpg \
168
+ --prompt 'A bird made of blue crystal' \
169
+ --adapter-size base \
170
+ --control-strength 0.6
171
+ ```
172
+
173
+ *Generate an image using lineart edge and text-to-image ControlAR:*
174
+
175
+ ```bash
176
+ python autoregressive/sample/sample_t2i.py \
177
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
178
+ --gpt-ckpt checkpoints/t2i/edge_base.safetensors \
179
+ --gpt-model GPT-XL --image-size 512 \
180
+ --condition-type lineart --seed 0 --condition-path condition/example/t2i/multigen/girl.jpg \
181
+ --prompt 'A girl with blue hair' \
182
+ --adapter-size base \
183
+ --control-strength 0.6
184
+ ```
185
+
186
+ (you can change lineart to canny_base or hed)
187
+
188
+
189
+ #### 4. Arbitrary-resolution generation
190
+
191
+ ```bash
192
+ python3 autoregressive/sample/sample_t2i_MR.py --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
193
+ --gpt-ckpt checkpoints/t2i/depth_MR.safetensors --gpt-model GPT-XL --image-size 768 \
194
+ --condition-type depth --condition-path condition/example/t2i/multi_resolution/bird.jpg \
195
+ --prompt 'colorful bird' --seed 0
196
+ ```
197
+
198
+ ```bash
199
+ python3 autoregressive/sample/sample_t2i_MR.py --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
200
+ --gpt-ckpt checkpoints/t2i/canny_MR.safetensors --gpt-model GPT-XL --image-size 768 \
201
+ --condition-type canny --condition-path condition/example/t2i/multi_resolution/bird.jpg \
202
+ --prompt 'colorful bird' --seed 0
203
+ ```
204
+
205
+ ### Preparing Datasets
206
+ We provide the dataset datails for evaluation and training. If you don't want to train ControlAR, just download the validation splits.
207
+
208
+ #### 1. Class-to-image
209
+ * Download [ImageNet](https://image-net.org/) and save it to `data/imagenet/data`.
210
+
211
+ #### 2. Text-to-image
212
+ * Download [ADE20K with caption](https://huggingface.co/datasets/limingcv/Captioned_ADE20K)(~7GB) and save the `.parquet` files to `data/Captioned_ADE20K/data`.
213
+ * Download [COCOStuff with caption](https://huggingface.co/datasets/limingcv/Captioned_COCOStuff)( ~62GB) and save the .parquet files to `data/Captioned_COCOStuff/data`.
214
+ * Download [MultiGen-20M](https://huggingface.co/datasets/limingcv/MultiGen-20M_depth)( ~1.22TB) and save the .parquet files to `data/MultiGen20M/data`.
215
+
216
+ #### 3. Preprocessing datasets
217
+ To save training time, we adopt the tokenizer to pre-process the images with the text prompts.
218
+
219
+ * ImageNet
220
+ ```bash
221
+ bash scripts/autoregressive/extract_file_imagenet.sh \
222
+ --vq-ckpt checkpoints/vq/vq_ds16_c2i.pt \
223
+ --data-path data/imagenet/data/val \
224
+ --code-path data/imagenet/val/imagenet_code_c2i_flip_ten_crop \
225
+ --ten-crop --crop-range 1.1 --image-size 256
226
+ ```
227
+ * ADE20k
228
+ ```sh
229
+ bash scripts/autoregressive/extract_file_ade.sh \
230
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
231
+ --data-path data/Captioned_ADE20K/data --code-path data/Captioned_ADE20K/val \
232
+ --ten-crop --crop-range 1.1 --image-size 512 --split validation
233
+ ```
234
+ * COCOStuff
235
+ ```bash
236
+ bash scripts/autoregressive/extract_file_cocostuff.sh \
237
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
238
+ --data-path data/Captioned_COCOStuff/data --code-path data/Captioned_COCOStuff/val \
239
+ --ten-crop --crop-range 1.1 --image-size 512 --split validation
240
+ ```
241
+ * MultiGen
242
+ ```bash
243
+ bash scripts/autoregressive/extract_file_multigen.sh \
244
+ --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
245
+ --data-path data/MultiGen20M/data --code-path data/MultiGen20M/val \
246
+ --ten-crop --crop-range 1.1 --image-size 512 --split validation
247
+ ```
248
+
249
+ ### Testing and Evaluation
250
+
251
+ #### 1. Class-to-image generation on ImageNet
252
+
253
+ ```bash
254
+ bash scripts/autoregressive/test_c2i.sh \
255
+ --vq-ckpt ./checkpoints/vq/vq_ds16_c2i.pt \
256
+ --gpt-ckpt ./checkpoints/c2i/canny/LlamaGen-L.pt \
257
+ --code-path /path/imagenet/val/imagenet_code_c2i_flip_ten_crop \
258
+ --gpt-model GPT-L --condition-type canny --get-condition-img True \
259
+ --sample-dir ./sample --save-image True
260
+ ```
261
+
262
+ ```bash
263
+ python create_npz.py --generated-images ./sample/imagenet/canny
264
+ ```
265
+ Then download imagenet [validation data](https://openaipublic.blob.core.windows.net/diffusion/jul-2021/ref_batches/imagenet/256/VIRTUAL_imagenet256_labeled.npz) which contains 10000 images, or you can use the whole validation data as reference data by running [val.sh](scripts/tokenizer/val.sh).
266
+
267
+ Calculate the FID score:
268
+ ```bash
269
+ python evaluations/c2i/evaluator.py /path/imagenet/val/FID/VIRTUAL_imagenet256_labeled.npz \
270
+ sample/imagenet/canny.npz
271
+ ```
272
+
273
+ #### 2. Text-to-image generation on ADE20k
274
+
275
+ Download Mask2Former([weight](https://download.openmmlab.com/mmsegmentation/v0.5/mask2former/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640/mask2former_swin-l-in22k-384x384-pre_8xb2-160k_ade20k-640x640_20221203_235933-7120c214.pth)) and save it to `evaluations/`.
276
+
277
+ Use this command to get 2000 images based on the segmentation mask:
278
+
279
+ ```bash
280
+ bash scripts/autoregressive/test_t2i.sh --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
281
+ --gpt-ckpt checkpoints/t2i/seg/seg_ade20k.pt \
282
+ --code-path data/Captioned_ADE20K/val --gpt-model GPT-XL --image-size 512 \
283
+ --sample-dir sample/ade20k --condition-type seg --seed 0
284
+ ```
285
+ Calculate mIoU of the segmentation masks from the generated images:
286
+ ```sh
287
+ python evaluations/ade20k_mIoU.py
288
+ ```
289
+
290
+ #### 3. Text-to-image generation on COCOStuff
291
+
292
+ Download DeepLabV3([weight](https://download.openmmlab.com/mmsegmentation/v0.5/deeplabv3/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k/deeplabv3_r101-d8_512x512_4x4_320k_coco-stuff164k_20210709_155402-3cbca14d.pth)) and save it to `evaluations/`.
293
+
294
+ Generate images using segmentation masks as condition controls:
295
+ ```bash
296
+ bash scripts/autoregressive/test_t2i.sh --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
297
+ --gpt-ckpt checkpoints/t2i/seg/seg_cocostuff.pt \
298
+ --code-path data/Captioned_COCOStuff/val --gpt-model GPT-XL --image-size 512 \
299
+ --sample-dir sample/cocostuff --condition-type seg --seed 0
300
+ ```
301
+ Calculate mIoU of the segmentation masks from the generated images:
302
+ ```bash
303
+ python evaluations/cocostuff_mIoU.py
304
+ ```
305
+
306
+ #### 4. Text-to-image generation on MultiGen-20M
307
+
308
+ We adopt **generation with HED edges** as the example:
309
+
310
+ Generate 5000 images based on the HED edges generated from validation images
311
+ ```sh
312
+ bash scripts/autoregressive/test_t2i.sh --vq-ckpt checkpoints/vq/vq_ds16_t2i.pt \
313
+ --gpt-ckpt checkpoints/t2i/hed/hed.safetensors --code-path data/MultiGen20M/val \
314
+ --gpt-model GPT-XL --image-size 512 --sample-dir sample/multigen/hed \
315
+ --condition-type hed --seed 0
316
+ ```
317
+
318
+ Evaluate the conditional consistency (SSIM):
319
+ ```bash
320
+ python evaluations/hed_ssim.py
321
+ ```
322
+ Calculate the FID score:
323
+ ```bash
324
+ python evaluations/clean_fid.py --val-images data/MultiGen20M/val/image --generated-images sample/multigen/hed/visualization
325
+ ```
326
+
327
+ ### Training ControlAR
328
+
329
+ #### 1. Class-to-image (Canny)
330
+
331
+ ```bash
332
+ bash scripts/autoregressive/train_c2i_canny.sh --cloud-save-path output \
333
+ --code-path data/imagenet/train/imagenet_code_c2i_flip_ten_crop \
334
+ --image-size 256 --gpt-model GPT-B --gpt-ckpt checkpoints/llamagen/c2i_B_256.pt
335
+ ```
336
+
337
+ #### 2. Text-to-image (Canny)
338
+
339
+ ```bash
340
+ bash scripts/autoregressive/train_t2i_canny.sh
341
+ ```
342
+
343
+
344
+ ## Acknowledgments
345
+
346
+ The development of ControlAR is based on [LlamaGen](https://github.com/FoundationVision/LlamaGen), [ControlNet](https://github.com/lllyasviel/ControlNet), [ControlNet++](https://github.com/liming-ai/ControlNet_Plus_Plus), and [AiM](https://github.com/hp-l33/AiM), and we sincerely thank the contributors for thoese great works!
347
+
348
+ ## Citation
349
+ If you find ControlAR is useful in your research or applications, please consider giving us a star 🌟 and citing it by the following BibTeX entry.
350
+
351
+ ```bibtex
352
+ @article{li2024controlar,
353
+ title={ControlAR: Controllable Image Generation with Autoregressive Models},
354
+ author={Zongming Li, Tianheng Cheng, Shoufa Chen, Peize Sun, Haocheng Shen, Longjin Ran, Xiaoxin Chen, Wenyu Liu, Xinggang Wang},
355
+ year={2024},
356
+ eprint={2410.02705},
357
+ archivePrefix={arXiv},
358
+ primaryClass={cs.CV},
359
+ url={https://arxiv.org/abs/2410.02705},
360
+ }
361
+ ```
362
+
assets/comparison.png ADDED

Git LFS Details

  • SHA256: 3cd19374f3544d16f9433e17eebb9135a9b93b5500486dac9d1032dfcd631980
  • Pointer size: 132 Bytes
  • Size of remote file: 3.9 MB
assets/logo.jpeg ADDED

Git LFS Details

  • SHA256: 742038b31c28245c9e000c51a9cf96df35ec28455c9cd25df7a6ffe5fffab033
  • Pointer size: 131 Bytes
  • Size of remote file: 254 kB
assets/vis.png ADDED

Git LFS Details

  • SHA256: a8604ada886ebe8860333e9ceddafc90a46daf587d01251f8a661801b4709a3c
  • Pointer size: 132 Bytes
  • Size of remote file: 3.11 MB
autoregressive/models/README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Download the vit weight first
2
+
3
+ ViT-small: https://huggingface.co/WinKawaks/vit-small-patch16-224 \
4
+ Dinov2-small: https://huggingface.co/facebook/dinov2-small \
5
+ Dinov2-base: https://huggingface.co/facebook/dinov2-base
6
+
7
+ Put them here
autoregressive/models/dinov2_adapter.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoImageProcessor, AutoModel
2
+ from PIL import Image
3
+ import requests
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+
8
+ class Dinov2_Adapter(nn.Module):
9
+ def __init__(self, input_dim=1, output_dim=768, attention=False, pool=False, nheads=8, dropout=0.1, adapter_size='small', condition_type='canny'):
10
+ super(Dinov2_Adapter, self).__init__()
11
+ print(f"Choose adapter size: {adapter_size}")
12
+ print(f"condition type: {condition_type}")
13
+ self.model = AutoModel.from_pretrained(f'autoregressive/models/dinov2-{adapter_size}')
14
+ self.condition_type = condition_type
15
+
16
+ def to_patch14(self, input):
17
+ H, W = input.shape[2:]
18
+ new_H = (H // 16) * 14
19
+ new_W = (W // 16) * 14
20
+ if self.condition_type in ['canny', 'seg']:
21
+ output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='nearest')#, align_corners=True) canny, seg
22
+ else:
23
+ output = torch.nn.functional.interpolate(input, size=(new_H, new_W), mode='bicubic', align_corners=True) # depth, lineart, hed
24
+ return output
25
+
26
+ def forward(self, x):
27
+ x = self.to_patch14(x)
28
+ x = self.model(x)
29
+ return x.last_hidden_state[:, 1:]
30
+
31
+
32
+ if __name__ == '__main__':
33
+ model = Dinov2_Adapter().cuda()
34
+ inputs = torch.randn(4,3,512,512).cuda()
35
+ outputs = model(inputs)
36
+ print(outputs.shape)
autoregressive/models/generate.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # gpt-fast: https://github.com/pytorch-labs/gpt-fast/blob/main/generate.py
3
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/models.py
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch.nn import functional as F
7
+ import torch._dynamo.config
8
+ import torch._inductor.config
9
+ import copy
10
+ import time
11
+ # torch._inductor.config.coordinate_descent_tuning = True
12
+ # torch._inductor.config.triton.unique_kernel_names = True
13
+ # torch._inductor.config.fx_graph_cache = True # Experimental feature to reduce compilation times, will be on by default in future
14
+
15
+
16
+ ### from https://huggingface.co/transformers/v3.2.0/_modules/transformers/generation_utils.html
17
+ def top_k_top_p_filtering(
18
+ logits,
19
+ top_k: int = 0,
20
+ top_p: float = 1.0,
21
+ filter_value: float = -float("Inf"),
22
+ min_tokens_to_keep: int = 1,
23
+ ):
24
+ """Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
25
+ Args:
26
+ logits: logits distribution shape (batch size, vocabulary size)
27
+ if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
28
+ if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
29
+ Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
30
+ Make sure we keep at least min_tokens_to_keep per batch example in the output
31
+ From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
32
+ """
33
+ if top_k > 0:
34
+ # import pdb;pdb.set_trace()
35
+ top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
36
+ # Remove all tokens with a probability less than the last token of the top-k
37
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
38
+ logits[indices_to_remove] = filter_value
39
+
40
+ if top_p < 1.0:
41
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True)
42
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
43
+
44
+ # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
45
+ sorted_indices_to_remove = cumulative_probs > top_p
46
+ if min_tokens_to_keep > 1:
47
+ # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
48
+ sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
49
+ # Shift the indices to the right to keep also the first token above the threshold
50
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
51
+ sorted_indices_to_remove[..., 0] = 0
52
+
53
+ # scatter sorted tensors to original indexing
54
+ indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
55
+ logits[indices_to_remove] = filter_value
56
+ return logits
57
+
58
+
59
+ def sample(logits, temperature: float=1.0, top_k: int=2000, top_p: float=1.0, sample_logits=True):
60
+ logits = logits[:, -1, :] / max(temperature, 1e-5)
61
+ if top_k > 0 or top_p < 1.0:
62
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
63
+ probs = F.softmax(logits, dim=-1)
64
+ # values, indices = torch.max(probs, dim=1, keepdim=True)
65
+ # mask = (probs == values).float()
66
+ # probs = probs * (1 - mask)
67
+ # values, indices = torch.max(probs, dim=1, keepdim=True)
68
+ # mask = (probs == values).float()
69
+ # probs = probs * (1 - mask)
70
+ if sample_logits:
71
+ idx = torch.multinomial(probs, num_samples=1)
72
+ else:
73
+ _, idx = torch.topk(probs, k=1, dim=-1)
74
+ return idx, probs
75
+
76
+
77
+ def logits_to_probs(logits, temperature: float = 1.0, top_p: float=1.0, top_k: int = None, **kwargs):
78
+ logits = logits / max(temperature, 1e-5)
79
+ if top_k > 0 or top_p < 1.0:
80
+ logits = top_k_top_p_filtering(logits, top_k=top_k, top_p=top_p)
81
+ probs = torch.nn.functional.softmax(logits, dim=-1)
82
+ return probs
83
+
84
+
85
+ def prefill(model, cond_idx: torch.Tensor, input_pos: torch.Tensor, cfg_scale: float, condition:torch.Tensor, control_strength: float=1, **sampling_kwargs):
86
+ if cfg_scale > 1.0:
87
+ logits, _ = model(None, cond_idx, input_pos, condition=condition, control_strength=control_strength)
88
+ logits_combined = logits
89
+ cond_logits, uncond_logits = torch.split(logits_combined, len(logits_combined) // 2, dim=0)
90
+ logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
91
+ else:
92
+ logits, _ = model(None, cond_idx, input_pos, condition=condition)
93
+
94
+ return sample(logits, **sampling_kwargs)[0]
95
+
96
+
97
+ def decode_one_token(model, x: torch.Tensor, input_pos: torch.Tensor, cfg_scale: float, cfg_flag: bool, condition: torch.Tensor, **sampling_kwargs):
98
+ assert input_pos.shape[-1] == 1
99
+ if cfg_scale > 1.0:
100
+ x_combined = torch.cat([x, x])
101
+ logits, _ = model(x_combined, cond_idx=None, input_pos=input_pos, condition=condition)
102
+ logits_combined = logits
103
+ cond_logits, uncond_logits = torch.split(logits_combined, len(logits_combined) // 2, dim=0)
104
+ if cfg_flag:
105
+ logits = uncond_logits + (cond_logits - uncond_logits) * cfg_scale
106
+ else:
107
+ logits = cond_logits
108
+ else:
109
+ logits, _ = model(x, cond_idx=None, input_pos=input_pos, condition=None)
110
+ return sample(logits, **sampling_kwargs)
111
+
112
+
113
+ def decode_n_tokens(
114
+ model, cur_token: torch.Tensor, input_pos: torch.Tensor, num_new_tokens: int,
115
+ cfg_scale: float, cfg_interval: int, condition: torch.Tensor,
116
+ **sampling_kwargs):
117
+ new_tokens, new_probs = [], []
118
+ cfg_flag = True
119
+ for i in range(num_new_tokens):
120
+ with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): # Actually better for Inductor to codegen attention here
121
+ if cfg_interval > -1 and i > cfg_interval:
122
+ cfg_flag = False
123
+ next_token, next_prob = decode_one_token(
124
+ model, cur_token, input_pos, cfg_scale, cfg_flag, condition=condition, **sampling_kwargs
125
+ )
126
+ input_pos += 1
127
+ new_tokens.append(next_token.clone())
128
+ new_probs.append(next_prob.clone())
129
+ cur_token = next_token.view(-1, 1)
130
+
131
+ return new_tokens, new_probs
132
+
133
+
134
+ @torch.no_grad()
135
+ def generate(model, cond, max_new_tokens, emb_masks=None, cfg_scale=1.0, cfg_interval=-1, condition=None, condition_null=None, condition_token_nums=0, control_strength=1, **sampling_kwargs):
136
+ if condition is not None:
137
+ condition = model.adapter(condition)
138
+ condition = model.adapter_mlp(condition)
139
+ if model.model_type == 'c2i':
140
+ if cfg_scale > 1.0:
141
+ cond_null = torch.ones_like(cond) * model.num_classes
142
+ cond_combined = torch.cat([cond, cond_null])
143
+ if condition is not None:
144
+ condition_null = torch.zeros_like(condition)
145
+ condition_combined = torch.cat((condition, condition_null), dim=0)
146
+ else:
147
+ condition_combined = None
148
+ else:
149
+ cond_combined = cond
150
+ if condition is not None:
151
+ condition_combined = condition
152
+ else:
153
+ condition_combined = None
154
+ T = 1+condition_token_nums
155
+ elif model.model_type == 't2i':
156
+ if cfg_scale > 1.0:
157
+ cond_null = torch.zeros_like(cond) + model.cls_embedding.uncond_embedding
158
+ cond_combined = torch.cat([cond, cond_null])
159
+
160
+ if condition is not None:
161
+ condition_null = torch.zeros_like(condition)
162
+ condition_combined = torch.cat((condition, condition_null), dim=0)
163
+ else:
164
+ condition_combined = None
165
+ else:
166
+ cond_combined = cond
167
+ if condition is not None:
168
+ condition_combined = condition
169
+ else:
170
+ condition_combined = None
171
+ T = cond.shape[1]
172
+ else:
173
+ raise Exception("please check model type")
174
+
175
+ T_new = T + max_new_tokens
176
+ max_seq_length = T_new
177
+ max_batch_size = cond.shape[0]
178
+
179
+ device = cond.device
180
+ with torch.device(device):
181
+ max_batch_size_cfg = max_batch_size * 2 if cfg_scale > 1.0 else max_batch_size
182
+ model.setup_caches(max_batch_size=max_batch_size_cfg, max_seq_length=max_seq_length, dtype=model.tok_embeddings.weight.dtype)
183
+
184
+ if emb_masks is not None:
185
+ assert emb_masks.shape[0] == max_batch_size
186
+ assert emb_masks.shape[-1] == T
187
+ if cfg_scale > 1.0:
188
+ model.causal_mask[:, :, :T] = model.causal_mask[:, :, :T] * torch.cat([emb_masks, emb_masks]).unsqueeze(1)
189
+ else:
190
+ model.causal_mask[:, :, :T] = model.causal_mask[:, :, :T] * emb_masks.unsqueeze(1)
191
+
192
+ eye_matrix = torch.eye(model.causal_mask.size(1), model.causal_mask.size(2), device=device)
193
+ model.causal_mask[:] = model.causal_mask * (1 - eye_matrix) + eye_matrix
194
+
195
+ # create an empty tensor of the expected final shape and fill in the current tokens
196
+ seq = torch.empty((max_batch_size, T_new), dtype=torch.int, device=device)
197
+ input_pos = torch.arange(0, T, device=device)
198
+ next_token = prefill(model, cond_combined, input_pos, cfg_scale, condition_combined, control_strength, **sampling_kwargs)
199
+ seq[:, T:T+1] = next_token
200
+
201
+ input_pos = torch.tensor([T], device=device, dtype=torch.int)
202
+ generated_tokens, _ = decode_n_tokens(model, next_token, input_pos, max_new_tokens-1, cfg_scale, cfg_interval, condition=condition_combined, **sampling_kwargs)
203
+ seq[:, T+1:] = torch.cat(generated_tokens, dim=1)
204
+ return seq[:, T:]
autoregressive/models/gpt.py ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # VQGAN: https://github.com/CompVis/taming-transformers/blob/master/taming/modules/transformer/mingpt.py
3
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/models.py
4
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
5
+ # llama: https://github.com/facebookresearch/llama/blob/main/llama/model.py
6
+ # gpt-fast: https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
7
+ # PixArt: https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
8
+ from dataclasses import dataclass
9
+ from typing import Optional, List
10
+
11
+ import io
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.nn import functional as F
15
+ from utils.drop_path import DropPath
16
+ from autoregressive.models.dinov2_adapter import Dinov2_Adapter
17
+ from autoregressive.models.vit_adapter import ViT_Adapter
18
+
19
+ def get_causal_mask(seq_length):
20
+ mask = torch.triu(torch.ones(seq_length, seq_length), diagonal=1).type(torch.bool)
21
+ mask = mask.masked_fill(mask, float('-inf'))
22
+ mask = mask.masked_fill(~mask, float(0.0))
23
+ return mask
24
+
25
+ def find_multiple(n: int, k: int):
26
+ if n % k == 0:
27
+ return n
28
+ return n + k - (n % k)
29
+
30
+ @dataclass
31
+ class ModelArgs:
32
+ dim: int = 4096
33
+ n_layer: int = 32
34
+ n_head: int = 32
35
+ n_kv_head: Optional[int] = None
36
+ multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
37
+ ffn_dim_multiplier: Optional[float] = None
38
+ rope_base: float = 10000
39
+ norm_eps: float = 1e-5
40
+ initializer_range: float = 0.02
41
+
42
+ token_dropout_p: float = 0.1
43
+ attn_dropout_p: float = 0.0
44
+ resid_dropout_p: float = 0.1
45
+ ffn_dropout_p: float = 0.1
46
+ drop_path_rate: float = 0.0
47
+
48
+ num_classes: int = 1000
49
+ caption_dim: int = 2048
50
+ class_dropout_prob: float = 0.1
51
+ model_type: str = 'c2i'
52
+
53
+ vocab_size: int = 16384
54
+ cls_token_num: int = 1
55
+ block_size: int = 256
56
+ max_batch_size: int = 32
57
+ max_seq_len: int = 2048
58
+
59
+ condition_token_num: int = 256
60
+ image_size: int = 256
61
+
62
+
63
+ #################################################################################
64
+ # Embedding Layers for Class Labels #
65
+ #################################################################################
66
+ class LabelEmbedder(nn.Module):
67
+ """
68
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
69
+ """
70
+ def __init__(self, num_classes, hidden_size, dropout_prob):
71
+ super().__init__()
72
+ use_cfg_embedding = dropout_prob > 0
73
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
74
+ self.num_classes = num_classes
75
+ self.dropout_prob = dropout_prob
76
+
77
+ def token_drop(self, labels, force_drop_ids=None):
78
+ """
79
+ Drops labels to enable classifier-free guidance.
80
+ """
81
+ if force_drop_ids is None:
82
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
83
+ else:
84
+ drop_ids = force_drop_ids == 1
85
+ labels = torch.where(drop_ids, self.num_classes, labels)
86
+ return labels, drop_ids
87
+
88
+ def forward(self, labels, train, force_drop_ids=None):
89
+ use_dropout = self.dropout_prob > 0
90
+ if (train and use_dropout) or (force_drop_ids is not None):
91
+ labels,drop_ids = self.token_drop(labels, force_drop_ids)
92
+ embeddings = self.embedding_table(labels).unsqueeze(1)
93
+ if (train and use_dropout) or (force_drop_ids is not None):
94
+ return embeddings,drop_ids
95
+ else:
96
+ return embeddings
97
+
98
+
99
+ class ConditionEmbedder(nn.Module):
100
+ """
101
+ Embeds Condition into vector representations. Also handles label dropout for classifier-free guidance.
102
+ """
103
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, vocab_size=16384):
104
+ super().__init__()
105
+ self.cap_proj = MLP(in_features=hidden_size, hidden_features=hidden_size, out_features=hidden_size)
106
+ self.register_buffer("uncond_embedding", torch.zeros(token_num, hidden_size) / hidden_size ** 0.5)
107
+ self.uncond_prob = uncond_prob
108
+
109
+ def token_drop(self, caption, force_drop_ids=None, drop_ids=None):
110
+ """
111
+ Drops labels to enable classifier-free guidance.
112
+ """
113
+ if force_drop_ids is None:
114
+ if drop_ids is None:
115
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
116
+ else:
117
+ drop_ids = force_drop_ids == 1
118
+ uncond_embedding = torch.zeros_like(caption[0])
119
+ caption = torch.where(drop_ids[:, None, None], uncond_embedding, caption)
120
+ return caption
121
+
122
+ def forward(self, caption, train, force_drop_ids=None, drop_ids=None):
123
+ use_dropout = self.uncond_prob > 0
124
+ if (train and use_dropout) or (force_drop_ids is not None):
125
+ caption = self.token_drop(caption, force_drop_ids, drop_ids)
126
+ embeddings = self.cap_proj(caption)
127
+ return embeddings
128
+
129
+ #################################################################################
130
+ # Embedding Layers for Text Feature #
131
+ #################################################################################
132
+ class CaptionEmbedder(nn.Module):
133
+ """
134
+ Embeds text caption into vector representations. Also handles label dropout for classifier-free guidance.
135
+ """
136
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120):
137
+ super().__init__()
138
+ self.cap_proj = MLP(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size)
139
+ self.register_buffer("uncond_embedding", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))
140
+ self.uncond_prob = uncond_prob
141
+
142
+ def token_drop(self, caption, force_drop_ids=None):
143
+ """
144
+ Drops labels to enable classifier-free guidance.
145
+ """
146
+ if force_drop_ids is None:
147
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
148
+ else:
149
+ drop_ids = force_drop_ids == 1
150
+ caption = torch.where(drop_ids[:, None, None], self.uncond_embedding, caption)
151
+ return caption
152
+
153
+ def forward(self, caption, train, force_drop_ids=None):
154
+ use_dropout = self.uncond_prob > 0
155
+ if (train and use_dropout) or (force_drop_ids is not None):
156
+ caption = self.token_drop(caption, force_drop_ids)
157
+ embeddings = self.cap_proj(caption)
158
+ return embeddings
159
+
160
+
161
+ class MLP(nn.Module):
162
+ def __init__(self, in_features, hidden_features, out_features):
163
+ super().__init__()
164
+ out_features = out_features or in_features
165
+ hidden_features = hidden_features or in_features
166
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
167
+ self.act = nn.GELU(approximate='tanh')
168
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
169
+
170
+ nn.init.zeros_(self.fc1.weight)
171
+ nn.init.zeros_(self.fc2.weight)
172
+
173
+ def forward(self, x):
174
+ x = self.fc1(x)
175
+ x = self.act(x)
176
+ x = self.fc2(x)
177
+ return x
178
+
179
+
180
+ #################################################################################
181
+ # GPT Model #
182
+ #################################################################################
183
+ class RMSNorm(torch.nn.Module):
184
+ def __init__(self, dim: int, eps: float = 1e-5):
185
+ super().__init__()
186
+ self.eps = eps
187
+ self.weight = nn.Parameter(torch.ones(dim))
188
+
189
+ def _norm(self, x):
190
+ return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
191
+
192
+ def forward(self, x):
193
+ output = self._norm(x.float()).type_as(x)
194
+ return output * self.weight
195
+
196
+
197
+ class FeedForward(nn.Module):
198
+ def __init__(self, config: ModelArgs):
199
+ super().__init__()
200
+ hidden_dim = 4 * config.dim
201
+ hidden_dim = int(2 * hidden_dim / 3)
202
+ # custom dim factor multiplier
203
+ if config.ffn_dim_multiplier is not None:
204
+ hidden_dim = int(config.ffn_dim_multiplier * hidden_dim)
205
+ hidden_dim = find_multiple(hidden_dim, config.multiple_of)
206
+
207
+ self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
208
+ self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
209
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
210
+ self.ffn_dropout = nn.Dropout(config.ffn_dropout_p)
211
+
212
+ def forward(self, x):
213
+ return self.ffn_dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
214
+
215
+
216
+ class KVCache(nn.Module):
217
+ def __init__(self, max_batch_size, max_seq_length, n_head, head_dim, dtype):
218
+ super().__init__()
219
+ cache_shape = (max_batch_size, n_head, max_seq_length, head_dim)
220
+ self.register_buffer('k_cache', torch.zeros(cache_shape, dtype=dtype))
221
+ self.register_buffer('v_cache', torch.zeros(cache_shape, dtype=dtype))
222
+
223
+ def update(self, input_pos, k_val, v_val):
224
+ # input_pos: [S], k_val: [B, H, S, D]
225
+ assert input_pos.shape[0] == k_val.shape[2]
226
+ k_out = self.k_cache
227
+ v_out = self.v_cache
228
+ k_out[:, :, input_pos] = k_val
229
+ v_out[:, :, input_pos] = v_val
230
+
231
+ return k_out, v_out
232
+
233
+
234
+ class Attention(nn.Module):
235
+ def __init__(self, config: ModelArgs):
236
+ super().__init__()
237
+ assert config.dim % config.n_head == 0
238
+ self.dim = config.dim
239
+ self.head_dim = config.dim // config.n_head
240
+ self.n_head = config.n_head
241
+ self.n_kv_head = config.n_kv_head if config.n_kv_head is not None else config.n_head
242
+ total_kv_dim = (self.n_head + 2 * self.n_kv_head) * self.head_dim
243
+
244
+ # key, query, value projections for all heads, but in a batch
245
+ self.wqkv = nn.Linear(config.dim, total_kv_dim, bias=False)
246
+ self.wo = nn.Linear(config.dim, config.dim, bias=False)
247
+ self.kv_cache = None
248
+
249
+ # regularization
250
+ self.attn_dropout_p = config.attn_dropout_p
251
+ self.resid_dropout = nn.Dropout(config.resid_dropout_p)
252
+
253
+ def forward(
254
+ self, x: torch.Tensor, freqs_cis: torch.Tensor = None,
255
+ input_pos: Optional[torch.Tensor] = None,
256
+ mask: Optional[torch.Tensor] = None
257
+ ):
258
+ bsz, seqlen, _ = x.shape
259
+ kv_size = self.n_kv_head * self.head_dim
260
+ xq, xk, xv = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
261
+
262
+ xq = xq.view(bsz, seqlen, self.n_head, self.head_dim)
263
+ xk = xk.view(bsz, seqlen, self.n_kv_head, self.head_dim)
264
+ xv = xv.view(bsz, seqlen, self.n_kv_head, self.head_dim)
265
+
266
+ xq = apply_rotary_emb(xq, freqs_cis)
267
+ xk = apply_rotary_emb(xk, freqs_cis)
268
+
269
+ xq, xk, xv = map(lambda x: x.transpose(1, 2), (xq, xk, xv))
270
+
271
+ if self.kv_cache is not None:
272
+ keys, values = self.kv_cache.update(input_pos, xk, xv)
273
+ else:
274
+ keys, values = xk, xv
275
+ keys = keys.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
276
+ values = values.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
277
+
278
+ output = F.scaled_dot_product_attention(
279
+ xq, keys, values,
280
+ attn_mask=mask,
281
+ is_causal=True if mask is None else False, # is_causal=False is for KV cache
282
+ dropout_p=self.attn_dropout_p if self.training else 0)
283
+
284
+ output = output.transpose(1, 2).contiguous().view(bsz, seqlen, self.dim)
285
+
286
+ output = self.resid_dropout(self.wo(output))
287
+ return output
288
+
289
+
290
+ class TransformerBlock(nn.Module):
291
+ def __init__(self, config: ModelArgs, drop_path: float):
292
+ super().__init__()
293
+ self.attention = Attention(config)
294
+ self.feed_forward = FeedForward(config)
295
+ self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
296
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
297
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
298
+
299
+ def forward(
300
+ self, x: torch.Tensor, freqs_cis: torch.Tensor, start_pos: int, mask: Optional[torch.Tensor] = None):
301
+ h = x + self.drop_path(self.attention(self.attention_norm(x), freqs_cis, start_pos, mask))
302
+ out = h + self.drop_path(self.feed_forward(self.ffn_norm(h)))
303
+ return out
304
+
305
+
306
+ class Transformer(nn.Module):
307
+ def __init__(self, config: ModelArgs):
308
+ super().__init__()
309
+ self.config = config
310
+ self.vocab_size = config.vocab_size
311
+ self.n_layer = config.n_layer
312
+ self.block_size = config.block_size
313
+ self.num_classes = config.num_classes
314
+ self.model_type = config.model_type
315
+ self.cls_token_num = config.cls_token_num
316
+ self.condition_token_num = config.condition_token_num
317
+ self.layer_internal = config.n_layer // 3
318
+ # self.adapter = Adapter(output_dim=config.dim)
319
+ self.adapter = ViT_Adapter()
320
+ # self.adapter = Deit_Adapter()
321
+ # self.adapter = EVA_Adapter(img_size=256, in_chans=3, embed_dim=384)
322
+ # self.adapter = Dinov2_Adapter(adapter_size='base')
323
+ # self.adapter = EVA_Adapter()
324
+ self.adapter_mlp = MLP(384, config.dim, config.dim)
325
+ # self.adapter_mlp = MLP(768, config.dim, config.dim)
326
+ # self.cross_attention = nn.MultiheadAttention(embed_dim=config.dim, num_heads=8,batch_first=True)
327
+ if self.model_type == 'c2i':
328
+ self.cls_embedding = LabelEmbedder(config.num_classes, config.dim, config.class_dropout_prob)
329
+ elif self.model_type == 't2i':
330
+ self.cls_embedding = CaptionEmbedder(config.caption_dim, config.dim, config.class_dropout_prob)
331
+ else:
332
+ raise Exception("please check model type")
333
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
334
+ self.tok_dropout = nn.Dropout(config.token_dropout_p)
335
+
336
+ self.condition_embeddings = nn.Embedding(config.vocab_size, config.dim)
337
+ self.condition_mlp = ConditionEmbedder((config.image_size // 16)**2, config.dim, config.class_dropout_prob, (config.image_size // 16)**2, config.vocab_size)
338
+
339
+ self.condition_layers = torch.nn.ModuleList()
340
+ for layer_id in range(3):
341
+ self.condition_layers.append(MLP(config.dim,config.dim,config.dim))
342
+
343
+ # transformer blocks
344
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.n_layer)]
345
+ self.layers = torch.nn.ModuleList()
346
+ for layer_id in range(config.n_layer):
347
+ self.layers.append(TransformerBlock(config, dpr[layer_id]))
348
+
349
+ # output layer
350
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
351
+ self.condition_norm = RMSNorm(config.dim, eps=config.norm_eps)
352
+ self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
353
+
354
+ # 2d rotary pos embedding
355
+ grid_size = int(self.block_size ** 0.5)
356
+ assert grid_size * grid_size == self.block_size
357
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num+self.condition_token_num)
358
+
359
+ # KVCache
360
+ self.max_batch_size = -1
361
+ self.max_seq_length = -1
362
+
363
+ self.initialize_weights()
364
+ self.condition_token = None
365
+ self.global_token = None
366
+ self.mask = get_causal_mask(256)
367
+
368
+ def initialize_weights(self):
369
+ # Initialize nn.Linear and nn.Embedding
370
+ self.apply(self._init_weights)
371
+
372
+
373
+ def _init_weights(self, module):
374
+ std = self.config.initializer_range
375
+ if isinstance(module, nn.Linear):
376
+ module.weight.data.normal_(mean=0.0, std=std)
377
+ if module.bias is not None:
378
+ module.bias.data.zero_()
379
+ elif isinstance(module, nn.Embedding):
380
+ module.weight.data.normal_(mean=0.0, std=std)
381
+
382
+ def setup_caches(self, max_batch_size, max_seq_length, dtype):
383
+ # if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:
384
+ # return
385
+ head_dim = self.config.dim // self.config.n_head
386
+ max_seq_length = find_multiple(max_seq_length, 8) #
387
+ self.max_seq_length = max_seq_length
388
+ self.max_batch_size = max_batch_size
389
+ for b in self.layers:
390
+ b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_head, head_dim, dtype)
391
+
392
+ causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool))
393
+ self.causal_mask = causal_mask.unsqueeze(0).repeat(self.max_batch_size, 1, 1)
394
+ grid_size = int(self.config.block_size ** 0.5)
395
+ assert grid_size * grid_size == self.block_size
396
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num+self.condition_token_num)
397
+
398
+
399
+
400
+ def forward(
401
+ self,
402
+ idx: torch.Tensor,
403
+ cond_idx: torch.Tensor, # cond_idx_or_embed
404
+ input_pos: Optional[torch.Tensor] = None,
405
+ targets: Optional[torch.Tensor] = None,
406
+ mask: Optional[torch.Tensor] = None,
407
+ valid: Optional[torch.Tensor] = None,
408
+ condition: Optional[torch.Tensor] = None
409
+ ):
410
+ if idx is not None and cond_idx is not None: # training or naive inference
411
+ cond_embeddings,drop_ids = self.cls_embedding(cond_idx, train=self.training) # 文本嵌入
412
+ cond_embeddings = cond_embeddings[:,:self.cls_token_num]
413
+ token_embeddings = self.tok_embeddings(idx) # 原始图像嵌入
414
+ if condition is not None:
415
+ condition_embeddings = self.adapter(condition) # 控制图像(深度图 边缘图)嵌入
416
+ condition_embeddings = self.adapter_mlp(condition_embeddings)
417
+
418
+ self.condition_token = self.condition_mlp(condition_embeddings,train=self.training, drop_ids=drop_ids)
419
+ token_embeddings = torch.cat((cond_embeddings, token_embeddings), dim=1)
420
+ h = self.tok_dropout(token_embeddings)
421
+ self.freqs_cis = self.freqs_cis.to(h.device)
422
+ else:
423
+ if cond_idx is not None: # prefill in inference
424
+ token_embeddings = self.cls_embedding(cond_idx, train=self.training)
425
+ token_embeddings = token_embeddings[:,:self.cls_token_num]
426
+ if condition is not None:
427
+ condition_embeddings = self.condition_mlp(condition.to(torch.bfloat16),train=self.training)
428
+ self.condition_token = condition_embeddings
429
+ else: # decode_n_tokens(kv cache) in inference
430
+ token_embeddings = self.tok_embeddings(idx)
431
+ bs = token_embeddings.shape[0]
432
+ mask = self.causal_mask[:bs, None, input_pos]
433
+ h = self.tok_dropout(token_embeddings)
434
+ self.freqs_cis = self.freqs_cis
435
+ if self.training:
436
+ freqs_cis = self.freqs_cis[:token_embeddings.shape[1]]
437
+ else:
438
+ freqs_cis = self.freqs_cis[input_pos]
439
+ # transformer blocks
440
+ for i, layer in enumerate(self.layers):
441
+ if i%self.layer_internal == 0: #每隔layer_internal=10层进行一次插入条件控制信息 会在第 0, 10, 20, 30 层做 condition 注入
442
+ if self.training:# h = torch.cat((cond_embeddings, token_embeddings), dim=1)
443
+ h = h + self.condition_layers[i//self.layer_internal](self.condition_token)
444
+ else:
445
+ if len(input_pos)>1:
446
+ h[:,-1:] = h[:,-1:] + self.condition_layers[i//self.layer_internal](self.condition_token[:,0:1])
447
+ else:
448
+ h = h + self.condition_layers[i//self.layer_internal](self.condition_token[:,input_pos])
449
+ h = layer(h, freqs_cis, input_pos, mask)
450
+ # output layers
451
+ h = self.norm(h)
452
+ logits = self.output(h).float()
453
+
454
+ if self.training:
455
+ logits = logits[:, self.cls_token_num+self.condition_token_num - 1:].contiguous()
456
+ # if we are given some desired targets also calculate the loss
457
+ loss = None
458
+ if valid is not None:
459
+ loss_all = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), reduction='none')
460
+ valid_all = valid[:,None].repeat(1, targets.shape[1]).view(-1)
461
+ loss = (loss_all * valid_all).sum() / max(valid_all.sum(), 1)
462
+ elif targets is not None:
463
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
464
+
465
+ return logits, loss
466
+
467
+
468
+ def get_fsdp_wrap_module_list(self) -> List[nn.Module]:
469
+ return list(self.layers)
470
+
471
+
472
+
473
+ #################################################################################
474
+ # Rotary Positional Embedding Functions #
475
+ #################################################################################
476
+ # https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
477
+ def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000, cls_token_num=120):
478
+ freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
479
+ t = torch.arange(seq_len, device=freqs.device)
480
+ freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2)
481
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
482
+ cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1) # (cls_token_num+seq_len, head_dim // 2, 2)
483
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+seq_len, head_dim // 2, 2)
484
+ return cond_cache
485
+
486
+
487
+ def precompute_freqs_cis_2d(grid_size: int, n_elem: int, base: int = 10000, cls_token_num=120):
488
+ # split the dimension into half, one for x and one for y
489
+ half_dim = n_elem // 2
490
+ freqs = 1.0 / (base ** (torch.arange(0, half_dim, 2)[: (half_dim // 2)].float() / half_dim))
491
+ t = torch.arange(grid_size, device=freqs.device)
492
+ freqs = torch.outer(t, freqs) # (grid_size, head_dim // 2)
493
+ freqs_grid = torch.concat([
494
+ freqs[:, None, :].expand(-1, grid_size, -1),
495
+ freqs[None, :, :].expand(grid_size, -1, -1),
496
+ ], dim=-1) # (grid_size, grid_size, head_dim // 2)
497
+ cache_grid = torch.stack([torch.cos(freqs_grid), torch.sin(freqs_grid)], dim=-1) # (grid_size, grid_size, head_dim // 2, 2)
498
+ cache = cache_grid.flatten(0, 1)
499
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+grid_size**2, head_dim // 2, 2)
500
+ return cond_cache
501
+
502
+
503
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor):
504
+ # x: (bs, seq_len, n_head, head_dim)
505
+ # freqs_cis (seq_len, head_dim // 2, 2)
506
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
507
+ freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2) # (1, seq_len, 1, head_dim//2, 2)
508
+ x_out2 = torch.stack([
509
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
510
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
511
+ ], dim=-1)
512
+ x_out2 = x_out2.flatten(3)
513
+ return x_out2.type_as(x)
514
+
515
+
516
+
517
+ #################################################################################
518
+ # GPT Configs #
519
+ #################################################################################
520
+ ### text-conditional
521
+ def GPT_7B(**kwargs):
522
+ return Transformer(ModelArgs(n_layer=32, n_head=32, dim=4096, **kwargs)) # 6.6B
523
+
524
+ def GPT_3B(**kwargs):
525
+ return Transformer(ModelArgs(n_layer=24, n_head=32, dim=3200, **kwargs)) # 3.1B
526
+
527
+ def GPT_1B(**kwargs):
528
+ return Transformer(ModelArgs(n_layer=22, n_head=32, dim=2048, **kwargs)) # 1.2B
529
+
530
+ ### class-conditional
531
+ def GPT_XXXL(**kwargs):
532
+ return Transformer(ModelArgs(n_layer=48, n_head=40, dim=2560, **kwargs)) # 3.9B
533
+
534
+ def GPT_XXL(**kwargs):
535
+ return Transformer(ModelArgs(n_layer=48, n_head=24, dim=1536, **kwargs)) # 1.4B
536
+
537
+ def GPT_XL(**kwargs):
538
+ return Transformer(ModelArgs(n_layer=36, n_head=20, dim=1280, **kwargs)) # 775M
539
+
540
+ def GPT_L(**kwargs):
541
+ return Transformer(ModelArgs(n_layer=24, n_head=16, dim=1024, **kwargs)) # 343M
542
+
543
+ def GPT_B(**kwargs):
544
+ return Transformer(ModelArgs(n_layer=12, n_head=12, dim=768, **kwargs)) # 111M
545
+
546
+
547
+ GPT_models = {
548
+ 'GPT-B': GPT_B, 'GPT-L': GPT_L, 'GPT-XL': GPT_XL, 'GPT-XXL': GPT_XXL, 'GPT-XXXL': GPT_XXXL,
549
+ 'GPT-1B': GPT_1B, 'GPT-3B': GPT_3B, 'GPT-7B': GPT_7B,
550
+ }
autoregressive/models/gpt_t2i.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # VQGAN: https://github.com/CompVis/taming-transformers/blob/master/taming/modules/transformer/mingpt.py
3
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/models.py
4
+ # nanoGPT: https://github.com/karpathy/nanoGPT/blob/master/model.py
5
+ # llama: https://github.com/facebookresearch/llama/blob/main/llama/model.py
6
+ # gpt-fast: https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
7
+ # PixArt: https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py
8
+ from dataclasses import dataclass
9
+ from typing import Optional, List
10
+
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ from torch.nn import functional as F
15
+ from utils.drop_path import DropPath
16
+ from autoregressive.models.vit_adapter import ViT_Adapter
17
+ from autoregressive.models.dinov2_adapter import Dinov2_Adapter
18
+
19
+
20
+ def get_causal_mask(seq_length):
21
+ mask = torch.triu(torch.ones(seq_length, seq_length), diagonal=1).type(torch.bool)
22
+ mask = mask.masked_fill(mask, float('-inf'))
23
+ mask = mask.masked_fill(~mask, float(0.0))
24
+ return mask
25
+
26
+ def find_multiple(n: int, k: int):
27
+ if n % k == 0:
28
+ return n
29
+ return n + k - (n % k)
30
+
31
+ @dataclass
32
+ class ModelArgs:
33
+ dim: int = 4096
34
+ n_layer: int = 32
35
+ n_head: int = 32
36
+ n_kv_head: Optional[int] = None
37
+ multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
38
+ ffn_dim_multiplier: Optional[float] = None
39
+ rope_base: float = 10000
40
+ norm_eps: float = 1e-5
41
+ initializer_range: float = 0.02
42
+
43
+ token_dropout_p: float = 0.1
44
+ attn_dropout_p: float = 0.0
45
+ resid_dropout_p: float = 0.1
46
+ ffn_dropout_p: float = 0.1
47
+ drop_path_rate: float = 0.0
48
+
49
+ num_classes: int = 1000
50
+ caption_dim: int = 2048
51
+ class_dropout_prob: float = 0.1
52
+ model_type: str = 'c2i'
53
+
54
+ vocab_size: int = 16384
55
+ cls_token_num: int = 1
56
+ block_size: int = 256
57
+ max_batch_size: int = 32
58
+ max_seq_len: int = 2048
59
+ adapter_size: str = 'small'
60
+ condition_type: str = 'canny'
61
+
62
+
63
+
64
+ #################################################################################
65
+ # Embedding Layers for Class Labels #
66
+ #################################################################################
67
+ class LabelEmbedder(nn.Module):
68
+ """
69
+ Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
70
+ """
71
+ def __init__(self, num_classes, hidden_size, dropout_prob):
72
+ super().__init__()
73
+ use_cfg_embedding = dropout_prob > 0
74
+ self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
75
+ self.num_classes = num_classes
76
+ self.dropout_prob = dropout_prob
77
+
78
+ def token_drop(self, labels, force_drop_ids=None):
79
+ """
80
+ Drops labels to enable classifier-free guidance.
81
+ """
82
+ if force_drop_ids is None:
83
+ drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
84
+ else:
85
+ drop_ids = force_drop_ids == 1
86
+ labels = torch.where(drop_ids, self.num_classes, labels)
87
+ return labels, drop_ids
88
+
89
+ def forward(self, labels, train, force_drop_ids=None):
90
+ use_dropout = self.dropout_prob > 0
91
+ if (train and use_dropout) or (force_drop_ids is not None):
92
+ labels,drop_ids = self.token_drop(labels, force_drop_ids)
93
+ embeddings = self.embedding_table(labels).unsqueeze(1)
94
+ if (train and use_dropout) or (force_drop_ids is not None):
95
+ return embeddings,drop_ids
96
+ else:
97
+ return embeddings
98
+
99
+
100
+ class ConditionEmbedder(nn.Module):
101
+ """
102
+ Embeds Condition into vector representations. Also handles label dropout for classifier-free guidance.
103
+ """
104
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120, vocab_size=16384):
105
+ super().__init__()
106
+ self.cap_proj = MLP(in_features=hidden_size, hidden_features=hidden_size, out_features=hidden_size)
107
+ self.register_buffer("uncond_embedding", torch.zeros(token_num, hidden_size) / hidden_size ** 0.5)
108
+ self.uncond_prob = uncond_prob
109
+
110
+ def token_drop(self, caption, force_drop_ids=None, drop_ids=None):
111
+ """
112
+ Drops labels to enable classifier-free guidance.
113
+ """
114
+ if force_drop_ids is None:
115
+ if drop_ids is None:
116
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
117
+ else:
118
+ drop_ids = force_drop_ids == 1
119
+
120
+ caption = torch.where(drop_ids[:, None, None], self.uncond_embedding[:caption.shape[1]], caption)
121
+ return caption
122
+
123
+ def forward(self, caption, train, force_drop_ids=None, drop_ids=None):
124
+ use_dropout = self.uncond_prob > 0
125
+ if (train and use_dropout) or (force_drop_ids is not None):
126
+ caption = self.token_drop(caption, force_drop_ids, drop_ids)
127
+ embeddings = self.cap_proj(caption)
128
+ return embeddings
129
+
130
+ #################################################################################
131
+ # Embedding Layers for Text Feature #
132
+ #################################################################################
133
+ class CaptionEmbedder(nn.Module):
134
+ """
135
+ Embeds text caption into vector representations. Also handles label dropout for classifier-free guidance.
136
+ """
137
+ def __init__(self, in_channels, hidden_size, uncond_prob, token_num=120):
138
+ super().__init__()
139
+ self.cap_proj = MLP(in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size)
140
+ self.register_buffer("uncond_embedding", nn.Parameter(torch.randn(token_num, in_channels) / in_channels ** 0.5))
141
+ self.uncond_prob = uncond_prob
142
+
143
+ def token_drop(self, caption, force_drop_ids=None):
144
+ """
145
+ Drops labels to enable classifier-free guidance.
146
+ """
147
+ if force_drop_ids is None:
148
+ drop_ids = torch.rand(caption.shape[0], device=caption.device) < self.uncond_prob
149
+ else:
150
+ drop_ids = force_drop_ids == 1
151
+ caption = torch.where(drop_ids[:, None, None], self.uncond_embedding, caption)
152
+ return caption, drop_ids
153
+
154
+ def forward(self, caption, train, force_drop_ids=None):
155
+ use_dropout = self.uncond_prob > 0
156
+ if (train and use_dropout) or (force_drop_ids is not None):
157
+ caption, drop_ids = self.token_drop(caption, force_drop_ids)
158
+ embeddings = self.cap_proj(caption)
159
+ if (train and use_dropout) or (force_drop_ids is not None):
160
+ return embeddings,drop_ids
161
+ else:
162
+ return embeddings
163
+
164
+
165
+ class MLP(nn.Module):
166
+ def __init__(self, in_features, hidden_features, out_features):
167
+ super().__init__()
168
+ out_features = out_features or in_features
169
+ hidden_features = hidden_features or in_features
170
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=False)
171
+ self.act = nn.GELU(approximate='tanh')
172
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=False)
173
+
174
+ nn.init.zeros_(self.fc1.weight)
175
+ nn.init.zeros_(self.fc2.weight)
176
+
177
+ def forward(self, x):
178
+ x = self.fc1(x)
179
+ x = self.act(x)
180
+ x = self.fc2(x)
181
+ return x
182
+
183
+
184
+ #################################################################################
185
+ # GPT Model #
186
+ #################################################################################
187
+ class RMSNorm(torch.nn.Module):
188
+ def __init__(self, dim: int, eps: float = 1e-5):
189
+ super().__init__()
190
+ self.eps = eps
191
+ self.weight = nn.Parameter(torch.ones(dim))
192
+
193
+ def _norm(self, x):
194
+ return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
195
+
196
+ def forward(self, x):
197
+ output = self._norm(x.float()).type_as(x)
198
+ return output * self.weight
199
+
200
+
201
+ class FeedForward(nn.Module):
202
+ def __init__(self, config: ModelArgs):
203
+ super().__init__()
204
+ hidden_dim = 4 * config.dim
205
+ hidden_dim = int(2 * hidden_dim / 3)
206
+ # custom dim factor multiplier
207
+ if config.ffn_dim_multiplier is not None:
208
+ hidden_dim = int(config.ffn_dim_multiplier * hidden_dim)
209
+ hidden_dim = find_multiple(hidden_dim, config.multiple_of)
210
+
211
+ self.w1 = nn.Linear(config.dim, hidden_dim, bias=False)
212
+ self.w3 = nn.Linear(config.dim, hidden_dim, bias=False)
213
+ self.w2 = nn.Linear(hidden_dim, config.dim, bias=False)
214
+ self.ffn_dropout = nn.Dropout(config.ffn_dropout_p)
215
+
216
+ def forward(self, x):
217
+ return self.ffn_dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
218
+
219
+
220
+ class KVCache(nn.Module):
221
+ def __init__(self, max_batch_size, max_seq_length, n_head, head_dim, dtype):
222
+ super().__init__()
223
+ cache_shape = (max_batch_size, n_head, max_seq_length, head_dim)
224
+ self.register_buffer('k_cache', torch.zeros(cache_shape, dtype=dtype))
225
+ self.register_buffer('v_cache', torch.zeros(cache_shape, dtype=dtype))
226
+
227
+ def update(self, input_pos, k_val, v_val):
228
+ # input_pos: [S], k_val: [B, H, S, D]
229
+ assert input_pos.shape[0] == k_val.shape[2]
230
+ k_out = self.k_cache
231
+ v_out = self.v_cache
232
+ k_out[:, :, input_pos] = k_val
233
+ v_out[:, :, input_pos] = v_val
234
+
235
+ return k_out, v_out
236
+
237
+
238
+ class Attention(nn.Module):
239
+ def __init__(self, config: ModelArgs):
240
+ super().__init__()
241
+ assert config.dim % config.n_head == 0
242
+ self.dim = config.dim
243
+ self.head_dim = config.dim // config.n_head
244
+ self.n_head = config.n_head
245
+ self.n_kv_head = config.n_kv_head if config.n_kv_head is not None else config.n_head
246
+ total_kv_dim = (self.n_head + 2 * self.n_kv_head) * self.head_dim
247
+
248
+ # key, query, value projections for all heads, but in a batch
249
+ self.wqkv = nn.Linear(config.dim, total_kv_dim, bias=False)
250
+ self.wo = nn.Linear(config.dim, config.dim, bias=False)
251
+ self.kv_cache = None
252
+
253
+ # regularization
254
+ self.attn_dropout_p = config.attn_dropout_p
255
+ self.resid_dropout = nn.Dropout(config.resid_dropout_p)
256
+
257
+ def forward(
258
+ self, x: torch.Tensor, freqs_cis: torch.Tensor = None,
259
+ input_pos: Optional[torch.Tensor] = None,
260
+ mask: Optional[torch.Tensor] = None
261
+ ):
262
+ bsz, seqlen, _ = x.shape
263
+ kv_size = self.n_kv_head * self.head_dim
264
+ xq, xk, xv = self.wqkv(x).split([self.dim, kv_size, kv_size], dim=-1)
265
+
266
+ xq = xq.view(bsz, seqlen, self.n_head, self.head_dim)
267
+ xk = xk.view(bsz, seqlen, self.n_kv_head, self.head_dim)
268
+ xv = xv.view(bsz, seqlen, self.n_kv_head, self.head_dim)
269
+
270
+ xq = apply_rotary_emb(xq, freqs_cis)
271
+ xk = apply_rotary_emb(xk, freqs_cis)
272
+
273
+ xq, xk, xv = map(lambda x: x.transpose(1, 2), (xq, xk, xv))
274
+
275
+ if self.kv_cache is not None:
276
+ keys, values = self.kv_cache.update(input_pos, xk, xv)
277
+ else:
278
+ keys, values = xk, xv
279
+ keys = keys.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
280
+ values = values.repeat_interleave(self.n_head // self.n_kv_head, dim=1)
281
+
282
+ output = F.scaled_dot_product_attention(
283
+ xq, keys, values,
284
+ attn_mask=mask,
285
+ is_causal=True if mask is None else False, # is_causal=False is for KV cache
286
+ dropout_p=self.attn_dropout_p if self.training else 0)
287
+
288
+ output = output.transpose(1, 2).contiguous().view(bsz, seqlen, self.dim)
289
+
290
+ output = self.resid_dropout(self.wo(output))
291
+ return output
292
+
293
+
294
+ class TransformerBlock(nn.Module):
295
+ def __init__(self, config: ModelArgs, drop_path: float):
296
+ super().__init__()
297
+ self.attention = Attention(config)
298
+ self.feed_forward = FeedForward(config)
299
+ self.attention_norm = RMSNorm(config.dim, eps=config.norm_eps)
300
+ self.ffn_norm = RMSNorm(config.dim, eps=config.norm_eps)
301
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
302
+
303
+ def forward(
304
+ self, x: torch.Tensor, freqs_cis: torch.Tensor, start_pos: int, mask: Optional[torch.Tensor] = None):
305
+ h = x + self.drop_path(self.attention(self.attention_norm(x), freqs_cis, start_pos, mask))
306
+ out = h + self.drop_path(self.feed_forward(self.ffn_norm(h)))
307
+ return out
308
+
309
+
310
+ class Transformer(nn.Module):
311
+ def __init__(self, config: ModelArgs):
312
+ super().__init__()
313
+ self.config = config
314
+ self.vocab_size = config.vocab_size
315
+ self.n_layer = config.n_layer
316
+ self.block_size = config.block_size
317
+ self.num_classes = config.num_classes
318
+ self.model_type = config.model_type
319
+ self.cls_token_num = config.cls_token_num
320
+ self.layer_internal = config.n_layer // 3
321
+ # self.adapter = Adapter(output_dim=768)
322
+ # self.adapter = ViT_Adapter()
323
+ # self.adapter = DeiT_Adapter()
324
+ self.adapter = Dinov2_Adapter(adapter_size=config.adapter_size, condition_type=config.condition_type)
325
+ # self.adapter = EVA_Adapter()
326
+ if config.adapter_size == "small":
327
+ self.adapter_mlp = MLP(384, config.dim, config.dim)
328
+ elif config.adapter_size == 'base':
329
+ self.adapter_mlp = MLP(768, config.dim, config.dim)
330
+
331
+ if self.model_type == 'c2i':
332
+ self.cls_embedding = LabelEmbedder(config.num_classes, config.dim, config.class_dropout_prob)
333
+ elif self.model_type == 't2i':
334
+ self.cls_embedding = CaptionEmbedder(config.caption_dim, config.dim, config.class_dropout_prob)
335
+ else:
336
+ raise Exception("please check model type")
337
+ self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
338
+ self.tok_dropout = nn.Dropout(config.token_dropout_p)
339
+
340
+ self.condition_embeddings = nn.Embedding(config.vocab_size, config.dim)
341
+ self.condition_mlp = ConditionEmbedder(self.block_size, config.dim, config.class_dropout_prob, self.block_size, config.vocab_size)
342
+ self.condition_layers = torch.nn.ModuleList()
343
+ for layer_id in range(3):
344
+ self.condition_layers.append(MLP(config.dim,config.dim,config.dim))
345
+
346
+ # transformer blocks
347
+ dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, config.n_layer)]
348
+ self.layers = torch.nn.ModuleList()
349
+ for layer_id in range(config.n_layer):
350
+ self.layers.append(TransformerBlock(config, dpr[layer_id]))
351
+
352
+ # output layer
353
+ self.norm = RMSNorm(config.dim, eps=config.norm_eps)
354
+ self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
355
+
356
+ # 2d rotary pos embedding
357
+ grid_size = int(self.block_size ** 0.5)
358
+ assert grid_size * grid_size == self.block_size
359
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num)
360
+
361
+ # KVCache
362
+ self.max_batch_size = -1
363
+ self.max_seq_length = -1
364
+
365
+ self.initialize_weights()
366
+ self.condition_token = None
367
+ self.mask = get_causal_mask(256)
368
+ self.global_token = None
369
+
370
+ self.control_strength = 1
371
+
372
+ def initialize_weights(self):
373
+ # Initialize nn.Linear and nn.Embedding
374
+ self.apply(self._init_weights)
375
+
376
+ # Zero-out output layers:
377
+ nn.init.constant_(self.output.weight, 0)
378
+
379
+
380
+
381
+ def _init_weights(self, module):
382
+ std = self.config.initializer_range
383
+ if isinstance(module, nn.Linear):
384
+ module.weight.data.normal_(mean=0.0, std=std)
385
+ if module.bias is not None:
386
+ module.bias.data.zero_()
387
+ elif isinstance(module, nn.Embedding):
388
+ module.weight.data.normal_(mean=0.0, std=std)
389
+
390
+
391
+ def setup_caches(self, max_batch_size, max_seq_length, dtype):
392
+ # if self.max_seq_length >= max_seq_length and self.max_batch_size >= max_batch_size:
393
+ # return
394
+ head_dim = self.config.dim // self.config.n_head
395
+ max_seq_length = find_multiple(max_seq_length, 8) #
396
+ self.max_seq_length = max_seq_length
397
+ self.max_batch_size = max_batch_size
398
+ for b in self.layers:
399
+ b.attention.kv_cache = KVCache(max_batch_size, max_seq_length, self.config.n_head, head_dim, dtype)
400
+
401
+ causal_mask = torch.tril(torch.ones(self.max_seq_length, self.max_seq_length, dtype=torch.bool))
402
+ self.causal_mask = causal_mask.unsqueeze(0).repeat(self.max_batch_size, 1, 1)
403
+ grid_size = int(self.config.block_size ** 0.5)
404
+ assert grid_size * grid_size == self.block_size
405
+ self.freqs_cis = precompute_freqs_cis_2d(grid_size, self.config.dim // self.config.n_head, self.config.rope_base, self.cls_token_num)
406
+
407
+
408
+
409
+ def forward(
410
+ self,
411
+ idx: torch.Tensor,
412
+ cond_idx: torch.Tensor, # cond_idx_or_embed
413
+ input_pos: Optional[torch.Tensor] = None,
414
+ targets: Optional[torch.Tensor] = None,
415
+ mask: Optional[torch.Tensor] = None,
416
+ valid: Optional[torch.Tensor] = None,
417
+ condition: Optional[torch.Tensor] = None,
418
+ control_strength: Optional[int] = 1
419
+ ):
420
+ if idx is not None and cond_idx is not None: # training or naive inference
421
+ cond_embeddings,drop_ids = self.cls_embedding(cond_idx, train=self.training)#生成条件嵌入
422
+ cond_embeddings = cond_embeddings[:,:self.cls_token_num]
423
+ token_embeddings = self.tok_embeddings(idx)
424
+ if condition is not None:
425
+ condition_embeddings = self.adapter(condition)
426
+ condition_embeddings = self.adapter_mlp(condition_embeddings)
427
+ self.condition_token = self.condition_mlp(condition_embeddings,train=self.training, drop_ids=drop_ids)
428
+ token_embeddings = torch.cat((cond_embeddings, token_embeddings), dim=1)
429
+
430
+ h = self.tok_dropout(token_embeddings)
431
+ self.freqs_cis = self.freqs_cis.to(h.device)
432
+ else:
433
+ if cond_idx is not None: # prefill in inference
434
+ self.control_strength = control_strength
435
+ token_embeddings = self.cls_embedding(cond_idx, train=self.training)
436
+ token_embeddings = token_embeddings[:,:self.cls_token_num]
437
+ if condition is not None:
438
+ condition_embeddings = self.condition_mlp(condition, train=self.training)#.to(torch.bfloat16),train=self.training)
439
+ self.condition_token = condition_embeddings
440
+ self.condition_token = [self.condition_layers[0](self.condition_token),
441
+ self.condition_layers[1](self.condition_token),
442
+ self.condition_layers[2](self.condition_token)]
443
+
444
+ else: # decode_n_tokens(kv cache) in inference
445
+ token_embeddings = self.tok_embeddings(idx)
446
+ bs = token_embeddings.shape[0]
447
+ mask = self.causal_mask[:bs, None, input_pos]
448
+ h = self.tok_dropout(token_embeddings)
449
+ self.freqs_cis = self.freqs_cis
450
+
451
+ if self.training:
452
+ freqs_cis = self.freqs_cis[:token_embeddings.shape[1]]
453
+ else:
454
+ freqs_cis = self.freqs_cis[input_pos]
455
+ # transformer blocks
456
+ for i, layer in enumerate(self.layers):
457
+ if i%self.layer_internal == 0:
458
+ if self.training:
459
+ h[:, self.cls_token_num-1:] = h[:, self.cls_token_num-1:] + self.condition_layers[i//self.layer_internal](self.condition_token)
460
+ else:
461
+ if len(input_pos)>1:
462
+ # h[:, -1:] = h[:, -1:] + self.condition_layers[i//self.layer_internal](self.condition_token[:,0:1])
463
+ h[:,-1:] = h[:, -1:] + self.control_strength*self.condition_token[i//self.layer_internal][:,0:1]
464
+ else:
465
+ # h = h + self.condition_layers[i//self.layer_internal](self.condition_token[:,input_pos-self.cls_token_num+1])
466
+ h = h + self.control_strength*self.condition_token[i//self.layer_internal][:,input_pos-self.cls_token_num+1]
467
+ h = layer(h, freqs_cis, input_pos, mask)
468
+ # output layers
469
+ h = self.norm(h)
470
+ logits = self.output(h).float()
471
+
472
+ if self.training:
473
+ logits = logits[:, self.cls_token_num - 1:].contiguous()
474
+ # if we are given some desired targets also calculate the loss
475
+ loss = None
476
+ if valid is not None:
477
+ loss_all = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), reduction='none')
478
+ valid_all = valid[:,None].repeat(1, targets.shape[1]).view(-1)
479
+ loss = (loss_all * valid_all).sum() / max(valid_all.sum(), 1)
480
+ elif targets is not None:
481
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
482
+
483
+
484
+ return logits, loss
485
+
486
+
487
+ def get_fsdp_wrap_module_list(self) -> List[nn.Module]:
488
+ return list(self.layers)
489
+
490
+
491
+
492
+ #################################################################################
493
+ # Rotary Positional Embedding Functions #
494
+ #################################################################################
495
+ # https://github.com/pytorch-labs/gpt-fast/blob/main/model.py
496
+ def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000, cls_token_num=120):
497
+ freqs = 1.0 / (base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem))
498
+ t = torch.arange(seq_len, device=freqs.device)
499
+ freqs = torch.outer(t, freqs) # (seq_len, head_dim // 2)
500
+ freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
501
+ cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1) # (cls_token_num+seq_len, head_dim // 2, 2)
502
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+seq_len, head_dim // 2, 2)
503
+ return cond_cache
504
+
505
+
506
+ def precompute_freqs_cis_2d(grid_size: int, n_elem: int, base: int = 10000, cls_token_num=120):
507
+ # split the dimension into half, one for x and one for y
508
+ half_dim = n_elem // 2
509
+ freqs = 1.0 / (base ** (torch.arange(0, half_dim, 2)[: (half_dim // 2)].float() / half_dim))
510
+ t = torch.arange(grid_size, device=freqs.device)
511
+ freqs = torch.outer(t, freqs) # (grid_size, head_dim // 2)
512
+ freqs_grid = torch.concat([
513
+ freqs[:, None, :].expand(-1, grid_size, -1),
514
+ freqs[None, :, :].expand(grid_size, -1, -1),
515
+ ], dim=-1) # (grid_size, grid_size, head_dim // 2)
516
+ cache_grid = torch.stack([torch.cos(freqs_grid), torch.sin(freqs_grid)], dim=-1) # (grid_size, grid_size, head_dim // 2, 2)
517
+ cache = cache_grid.flatten(0, 1)
518
+ cond_cache = torch.cat([torch.zeros(cls_token_num, n_elem // 2, 2), cache]) # (cls_token_num+grid_size**2, head_dim // 2, 2)
519
+ return cond_cache
520
+
521
+
522
+ def apply_rotary_emb(x: torch.Tensor, freqs_cis: torch.Tensor):
523
+ # x: (bs, seq_len, n_head, head_dim)
524
+ # freqs_cis (seq_len, head_dim // 2, 2)
525
+ xshaped = x.float().reshape(*x.shape[:-1], -1, 2) # (bs, seq_len, n_head, head_dim//2, 2)
526
+ freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2) # (1, seq_len, 1, head_dim//2, 2)
527
+ x_out2 = torch.stack([
528
+ xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
529
+ xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
530
+ ], dim=-1)
531
+ x_out2 = x_out2.flatten(3)
532
+ return x_out2.type_as(x)
533
+
534
+
535
+
536
+ #################################################################################
537
+ # GPT Configs #
538
+ #################################################################################
539
+ ### text-conditional
540
+ def GPT_7B(**kwargs):
541
+ return Transformer(ModelArgs(n_layer=32, n_head=32, dim=4096, **kwargs)) # 6.6B
542
+
543
+ def GPT_3B(**kwargs):
544
+ return Transformer(ModelArgs(n_layer=24, n_head=32, dim=3200, **kwargs)) # 3.1B
545
+
546
+ def GPT_1B(**kwargs):
547
+ return Transformer(ModelArgs(n_layer=22, n_head=32, dim=2048, **kwargs)) # 1.2B
548
+
549
+ ### class-conditional
550
+ def GPT_XXXL(**kwargs):
551
+ return Transformer(ModelArgs(n_layer=48, n_head=40, dim=2560, **kwargs)) # 3.9B
552
+
553
+ def GPT_XXL(**kwargs):
554
+ return Transformer(ModelArgs(n_layer=48, n_head=24, dim=1536, **kwargs)) # 1.4B
555
+
556
+ def GPT_XL(**kwargs):
557
+ return Transformer(ModelArgs(n_layer=36, n_head=20, dim=1280, **kwargs)) # 775M
558
+
559
+ def GPT_L(**kwargs):
560
+ return Transformer(ModelArgs(n_layer=24, n_head=16, dim=1024, **kwargs)) # 343M
561
+
562
+ def GPT_B(**kwargs):
563
+ return Transformer(ModelArgs(n_layer=12, n_head=12, dim=768, **kwargs)) # 111M
564
+
565
+
566
+ GPT_models = {
567
+ 'GPT-B': GPT_B, 'GPT-L': GPT_L, 'GPT-XL': GPT_XL, 'GPT-XXL': GPT_XXL, 'GPT-XXXL': GPT_XXXL,
568
+ 'GPT-1B': GPT_1B, 'GPT-3B': GPT_3B, 'GPT-7B': GPT_7B,
569
+ }
autoregressive/models/vit_adapter.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoImageProcessor, AutoModel
2
+ from PIL import Image
3
+ import requests
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+
8
+ class ViT_Adapter(nn.Module):
9
+ def __init__(self, input_dim=3, output_dim=768, attention=False, pool=False, nheads=8, dropout=0.1):
10
+ super(ViT_Adapter, self).__init__()
11
+ self.model = AutoModel.from_pretrained('autoregressive/models/vit-small')
12
+
13
+ def forward(self, x):
14
+ x = self.model(x,interpolate_pos_encoding=True)
15
+ return x.last_hidden_state[:, 1:]
16
+
17
+
18
+ if __name__ == '__main__':
19
+ model = ViT_Adapter().cuda()
20
+ import pdb;pdb.set_trace()
21
+ print(sum(p.numel() for p in model.parameters()))
22
+ inputs = torch.randn(4,3,512,512).cuda()
23
+
24
+ outputs = model(inputs)
25
+
26
+ print(outputs.shape)
autoregressive/sample/sample_c2i_ddp.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # DiT: https://github.com/facebookresearch/DiT/blob/main/sample_ddp.py
3
+ import torch
4
+ torch.backends.cuda.matmul.allow_tf32 = True
5
+ torch.backends.cudnn.allow_tf32 = True
6
+ import torch.nn.functional as F
7
+ import torch.distributed as dist
8
+
9
+ from tqdm import tqdm
10
+ import os
11
+ from PIL import Image
12
+ import numpy as np
13
+ import math
14
+ import argparse
15
+
16
+ from tokenizer.tokenizer_image.vq_model import VQ_models
17
+ from autoregressive.models.gpt import GPT_models
18
+ from autoregressive.models.generate import generate
19
+
20
+
21
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
22
+ """
23
+ Builds a single .npz file from a folder of .png samples.
24
+ """
25
+ samples = []
26
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
27
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
28
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
29
+ samples.append(sample_np)
30
+ samples = np.stack(samples)
31
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
32
+ npz_path = f"{sample_dir}.npz"
33
+ np.savez(npz_path, arr_0=samples)
34
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
35
+ return npz_path
36
+
37
+
38
+ def main(args):
39
+ # Setup PyTorch:
40
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
41
+ torch.set_grad_enabled(False)
42
+
43
+ # Setup DDP:
44
+ dist.init_process_group("nccl")
45
+ rank = dist.get_rank()
46
+ device = rank % torch.cuda.device_count()
47
+ seed = args.global_seed * dist.get_world_size() + rank
48
+ torch.manual_seed(seed)
49
+ torch.cuda.set_device(device)
50
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
51
+
52
+ # create and load model
53
+ vq_model = VQ_models[args.vq_model](
54
+ codebook_size=args.codebook_size,
55
+ codebook_embed_dim=args.codebook_embed_dim)
56
+ vq_model.to(device)
57
+ vq_model.eval()
58
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
59
+ vq_model.load_state_dict(checkpoint["model"])
60
+ del checkpoint
61
+
62
+ # create and load gpt model
63
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
64
+ latent_size = args.image_size // args.downsample_size
65
+ gpt_model = GPT_models[args.gpt_model](
66
+ vocab_size=args.codebook_size,
67
+ block_size=latent_size ** 2,
68
+ num_classes=args.num_classes,
69
+ cls_token_num=args.cls_token_num,
70
+ model_type=args.gpt_type,
71
+ ).to(device=device, dtype=precision)
72
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
73
+ if args.from_fsdp: # fsdp
74
+ model_weight = checkpoint
75
+ elif "model" in checkpoint: # ddp
76
+ model_weight = checkpoint["model"]
77
+ elif "module" in checkpoint: # deepspeed
78
+ model_weight = checkpoint["module"]
79
+ elif "state_dict" in checkpoint:
80
+ model_weight = checkpoint["state_dict"]
81
+ else:
82
+ raise Exception("please check model weight, maybe add --from-fsdp to run command")
83
+ # if 'freqs_cis' in model_weight:
84
+ # model_weight.pop('freqs_cis')
85
+ gpt_model.load_state_dict(model_weight, strict=False)
86
+ gpt_model.eval()
87
+ del checkpoint
88
+
89
+ if args.compile:
90
+ print(f"compiling the model...")
91
+ gpt_model = torch.compile(
92
+ gpt_model,
93
+ mode="reduce-overhead",
94
+ fullgraph=True
95
+ ) # requires PyTorch 2.0 (optional)
96
+ else:
97
+ print(f"no model compile")
98
+
99
+ # Create folder to save samples:
100
+ model_string_name = args.gpt_model.replace("/", "-")
101
+ if args.from_fsdp:
102
+ ckpt_string_name = args.gpt_ckpt.split('/')[-2]
103
+ else:
104
+ ckpt_string_name = os.path.basename(args.gpt_ckpt).replace(".pth", "").replace(".pt", "")
105
+ folder_name = f"{model_string_name}-{ckpt_string_name}-size-{args.image_size}-size-{args.image_size_eval}-{args.vq_model}-" \
106
+ f"topk-{args.top_k}-topp-{args.top_p}-temperature-{args.temperature}-" \
107
+ f"cfg-{args.cfg_scale}-seed-{args.global_seed}"
108
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
109
+ if rank == 0:
110
+ os.makedirs(sample_folder_dir, exist_ok=True)
111
+ print(f"Saving .png samples at {sample_folder_dir}")
112
+ dist.barrier()
113
+
114
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
115
+ n = args.per_proc_batch_size
116
+ global_batch_size = n * dist.get_world_size()
117
+ # To make things evenly-divisible, we'll sample a bit more than we need and then discard the extra samples:
118
+ total_samples = int(math.ceil(args.num_fid_samples / global_batch_size) * global_batch_size)
119
+ if rank == 0:
120
+ print(f"Total number of images that will be sampled: {total_samples}")
121
+ assert total_samples % dist.get_world_size() == 0, "total_samples must be divisible by world_size"
122
+ samples_needed_this_gpu = int(total_samples // dist.get_world_size())
123
+ assert samples_needed_this_gpu % n == 0, "samples_needed_this_gpu must be divisible by the per-GPU batch size"
124
+ iterations = int(samples_needed_this_gpu // n)
125
+ pbar = range(iterations)
126
+ pbar = tqdm(pbar) if rank == 0 else pbar
127
+ total = 0
128
+ for _ in pbar:
129
+ # Sample inputs:
130
+ c_indices = torch.randint(0, args.num_classes, (n,), device=device)
131
+ qzshape = [len(c_indices), args.codebook_embed_dim, latent_size, latent_size]
132
+
133
+ index_sample = generate(
134
+ gpt_model, c_indices, latent_size ** 2,
135
+ cfg_scale=args.cfg_scale, cfg_interval=args.cfg_interval,
136
+ temperature=args.temperature, top_k=args.top_k,
137
+ top_p=args.top_p, sample_logits=True,
138
+ )
139
+
140
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
141
+ if args.image_size_eval != args.image_size:
142
+ samples = F.interpolate(samples, size=(args.image_size_eval, args.image_size_eval), mode='bicubic')
143
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
144
+
145
+ # Save samples to disk as individual .png files
146
+ for i, sample in enumerate(samples):
147
+ index = i * dist.get_world_size() + rank + total
148
+ Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
149
+ total += global_batch_size
150
+
151
+ # Make sure all processes have finished saving their samples before attempting to convert to .npz
152
+ dist.barrier()
153
+ if rank == 0:
154
+ create_npz_from_sample_folder(sample_folder_dir, args.num_fid_samples)
155
+ print("Done.")
156
+ dist.barrier()
157
+ dist.destroy_process_group()
158
+
159
+
160
+
161
+ if __name__ == "__main__":
162
+ parser = argparse.ArgumentParser()
163
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-B")
164
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
165
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="c2i", help="class-conditional or text-conditional")
166
+ parser.add_argument("--from-fsdp", action='store_true')
167
+ parser.add_argument("--cls-token-num", type=int, default=1, help="max token number of condition input")
168
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
169
+ parser.add_argument("--compile", action='store_true', default=True)
170
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
171
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
172
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
173
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
174
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=384)
175
+ parser.add_argument("--image-size-eval", type=int, choices=[256, 384, 512], default=256)
176
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
177
+ parser.add_argument("--num-classes", type=int, default=1000)
178
+ parser.add_argument("--cfg-scale", type=float, default=1.5)
179
+ parser.add_argument("--cfg-interval", type=float, default=-1)
180
+ parser.add_argument("--sample-dir", type=str, default="samples")
181
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
182
+ parser.add_argument("--num-fid-samples", type=int, default=5000)
183
+ parser.add_argument("--global-seed", type=int, default=0)
184
+ parser.add_argument("--top-k", type=int, default=0,help="top-k value to sample with")
185
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
186
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
187
+ args = parser.parse_args()
188
+ main(args)
autoregressive/sample/sample_t2i_ddp.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ torch.set_float32_matmul_precision('high')
5
+ setattr(torch.nn.Linear, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
6
+ setattr(torch.nn.LayerNorm, 'reset_parameters', lambda self: None) # disable default parameter init for faster speed
7
+ import torch.nn.functional as F
8
+ import torch.distributed as dist
9
+
10
+ import os
11
+ import math
12
+ import json
13
+ import argparse
14
+ import pandas as pd
15
+ from tqdm import tqdm
16
+ from PIL import Image
17
+
18
+ from tokenizer.tokenizer_image.vq_model import VQ_models
19
+ from language.t5 import T5Embedder
20
+ from autoregressive.models.gpt import GPT_models
21
+ from autoregressive.models.generate import generate
22
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
23
+
24
+
25
+
26
+ def main(args):
27
+ # Setup PyTorch:
28
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
29
+ torch.set_grad_enabled(False)
30
+
31
+ # Setup DDP:
32
+ dist.init_process_group("nccl")
33
+ rank = dist.get_rank()
34
+ device = rank % torch.cuda.device_count()
35
+ seed = args.global_seed * dist.get_world_size() + rank
36
+ torch.manual_seed(seed)
37
+ torch.cuda.set_device(device)
38
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
39
+
40
+ # create and load model
41
+ vq_model = VQ_models[args.vq_model](
42
+ codebook_size=args.codebook_size,
43
+ codebook_embed_dim=args.codebook_embed_dim)
44
+ vq_model.to(device)
45
+ vq_model.eval()
46
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
47
+ vq_model.load_state_dict(checkpoint["model"])
48
+ del checkpoint
49
+ print(f"image tokenizer is loaded")
50
+
51
+ # create and load gpt model
52
+ precision = {'none': torch.float32, 'bf16': torch.bfloat16, 'fp16': torch.float16}[args.precision]
53
+ latent_size = args.image_size // args.downsample_size
54
+ gpt_model = GPT_models[args.gpt_model](
55
+ block_size=latent_size ** 2,
56
+ cls_token_num=args.cls_token_num,
57
+ model_type=args.gpt_type,
58
+ ).to(device=device, dtype=precision)
59
+
60
+ checkpoint = torch.load(args.gpt_ckpt, map_location="cpu")
61
+
62
+ if "model" in checkpoint: # ddp
63
+ model_weight = checkpoint["model"]
64
+ elif "module" in checkpoint: # deepspeed
65
+ model_weight = checkpoint["module"]
66
+ elif "state_dict" in checkpoint:
67
+ model_weight = checkpoint["state_dict"]
68
+ else:
69
+ raise Exception("please check model weight")
70
+ gpt_model.load_state_dict(model_weight, strict=False)
71
+ gpt_model.eval()
72
+ del checkpoint
73
+ print(f"gpt model is loaded")
74
+
75
+ if args.compile:
76
+ print(f"compiling the model...")
77
+ gpt_model = torch.compile(
78
+ gpt_model,
79
+ mode="reduce-overhead",
80
+ fullgraph=True
81
+ ) # requires PyTorch 2.0 (optional)
82
+ else:
83
+ print(f"no need to compile model in demo")
84
+
85
+ assert os.path.exists(args.t5_path)
86
+ t5_model = T5Embedder(
87
+ device=device,
88
+ local_cache=True,
89
+ cache_dir=args.t5_path,
90
+ dir_or_name=args.t5_model_type,
91
+ torch_dtype=precision,
92
+ model_max_length=args.t5_feature_max_len,
93
+ )
94
+ print(f"t5 model is loaded")
95
+
96
+ # Create folder to save samples:
97
+ model_string_name = args.gpt_model.replace("/", "-")
98
+ ckpt_string_name = os.path.basename(args.gpt_ckpt).replace(".pth", "").replace(".pt", "")
99
+ prompt_name = args.prompt_csv.split('/')[-1].split('.')[0].lower()
100
+ folder_name = f"{model_string_name}-{ckpt_string_name}-{prompt_name}-size-{args.image_size}-size-{args.image_size}-{args.vq_model}-" \
101
+ f"topk-{args.top_k}-topp-{args.top_p}-temperature-{args.temperature}-" \
102
+ f"cfg-{args.cfg_scale}-seed-{args.global_seed}"
103
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
104
+ if rank == 0:
105
+ os.makedirs(f"{sample_folder_dir}/images", exist_ok=True)
106
+ print(f"Saving .png samples at {sample_folder_dir}/images")
107
+ dist.barrier()
108
+
109
+ df = pd.read_csv(args.prompt_csv, delimiter='\t')
110
+ prompt_list = df['Prompt'].tolist()
111
+
112
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
113
+ n = args.per_proc_batch_size
114
+ global_batch_size = n * dist.get_world_size()
115
+ num_fid_samples = min(args.num_fid_samples, len(prompt_list))
116
+ # To make things evenly-divisible, we'll sample a bit more than we need and then discard the extra samples:
117
+ total_samples = int(math.ceil(num_fid_samples / global_batch_size) * global_batch_size)
118
+ if rank == 0:
119
+ print(f"Total number of images that will be sampled: {total_samples}")
120
+ assert total_samples % dist.get_world_size() == 0, "total_samples must be divisible by world_size"
121
+ samples_needed_this_gpu = int(total_samples // dist.get_world_size())
122
+ assert samples_needed_this_gpu % n == 0, "samples_needed_this_gpu must be divisible by the per-GPU batch size"
123
+ iterations = int(samples_needed_this_gpu // n)
124
+ pbar = range(iterations)
125
+ pbar = tqdm(pbar) if rank == 0 else pbar
126
+ total = 0
127
+ for _ in pbar:
128
+ # Select text prompt
129
+ prompt_batch = []
130
+ for i in range(n):
131
+ index = i * dist.get_world_size() + rank + total
132
+ prompt_batch.append(prompt_list[index] if index < len(prompt_list) else "a cute dog")
133
+
134
+ # Sample inputs:
135
+ caption_embs, emb_masks = t5_model.get_text_embeddings(prompt_batch)
136
+
137
+ if not args.no_left_padding:
138
+ new_emb_masks = torch.flip(emb_masks, dims=[-1])
139
+ new_caption_embs = []
140
+ for idx, (caption_emb, emb_mask) in enumerate(zip(caption_embs, emb_masks)):
141
+ valid_num = int(emb_mask.sum().item())
142
+ # prompt_cur = prompt_batch[idx]
143
+ # print(f' prompt {idx} token len: {valid_num} : {prompt_cur}')
144
+ new_caption_emb = torch.cat([caption_emb[valid_num:], caption_emb[:valid_num]])
145
+ new_caption_embs.append(new_caption_emb)
146
+ new_caption_embs = torch.stack(new_caption_embs)
147
+
148
+ else:
149
+ new_caption_embs, new_emb_masks = caption_embs, emb_masks
150
+
151
+ c_indices = new_caption_embs * new_emb_masks[:,:, None]
152
+ c_emb_masks = new_emb_masks
153
+
154
+ qzshape = [len(c_indices), args.codebook_embed_dim, latent_size, latent_size]
155
+ index_sample = generate(
156
+ gpt_model, c_indices, latent_size ** 2,
157
+ c_emb_masks,
158
+ cfg_scale=args.cfg_scale,
159
+ temperature=args.temperature, top_k=args.top_k,
160
+ top_p=args.top_p, sample_logits=True,
161
+ )
162
+
163
+ samples = vq_model.decode_code(index_sample, qzshape) # output value is between [-1, 1]
164
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
165
+
166
+ # Save samples to disk as individual .png files
167
+ for i, sample in enumerate(samples):
168
+ index = i * dist.get_world_size() + rank + total
169
+ Image.fromarray(sample).save(f"{sample_folder_dir}/images/{index:06d}.png")
170
+ total += global_batch_size
171
+
172
+ # Make sure all processes have finished saving their samples before attempting to convert to .npz
173
+ dist.barrier()
174
+ if rank == 0:
175
+ # Save infer result in a jsonl file
176
+ json_items = []
177
+ for idx, prompt in enumerate(prompt_list):
178
+ image_path = os.path.join(sample_folder_dir, "images", f"{idx:06d}.png")
179
+ json_items.append({"text": prompt, "image_path": image_path})
180
+ res_jsonl_path = os.path.join(sample_folder_dir, "result.jsonl")
181
+ print(f"Save jsonl to {res_jsonl_path}...")
182
+ with open(res_jsonl_path, "w") as f:
183
+ for item in json_items:
184
+ f.write(json.dumps(item) + "\n")
185
+
186
+ # Save captions to txt
187
+ caption_path = os.path.join(sample_folder_dir, "captions.txt")
188
+ print(f"Save captions to {caption_path}...")
189
+ with open(caption_path, "w") as f:
190
+ for item in prompt_list:
191
+ f.write(f"{item}\n")
192
+ print("Done.")
193
+
194
+ dist.barrier()
195
+ dist.destroy_process_group()
196
+
197
+
198
+
199
+ if __name__ == "__main__":
200
+ parser = argparse.ArgumentParser()
201
+ parser.add_argument("--prompt-csv", type=str, default='evaluations/t2i/PartiPrompts.tsv')
202
+ parser.add_argument("--t5-path", type=str, default='pretrained_models/t5-ckpt')
203
+ parser.add_argument("--t5-model-type", type=str, default='flan-t5-xl')
204
+ parser.add_argument("--t5-feature-max-len", type=int, default=120)
205
+ parser.add_argument("--t5-feature-dim", type=int, default=2048)
206
+ parser.add_argument("--no-left-padding", action='store_true', default=False)
207
+ parser.add_argument("--gpt-model", type=str, choices=list(GPT_models.keys()), default="GPT-XL")
208
+ parser.add_argument("--gpt-ckpt", type=str, default=None)
209
+ parser.add_argument("--gpt-type", type=str, choices=['c2i', 't2i'], default="t2i", help="class->image or text->image")
210
+ parser.add_argument("--cls-token-num", type=int, default=120, help="max token number of condition input")
211
+ parser.add_argument("--precision", type=str, default='bf16', choices=["none", "fp16", "bf16"])
212
+ parser.add_argument("--compile", action='store_true', default=False)
213
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
214
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
215
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
216
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
217
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=512)
218
+ parser.add_argument("--downsample-size", type=int, choices=[8, 16], default=16)
219
+ parser.add_argument("--num-classes", type=int, default=1000)
220
+ parser.add_argument("--cfg-scale", type=float, default=7.5)
221
+ parser.add_argument("--sample-dir", type=str, default="samples_parti", help="samples_coco or samples_parti")
222
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
223
+ parser.add_argument("--num-fid-samples", type=int, default=30000)
224
+ parser.add_argument("--global-seed", type=int, default=0)
225
+ parser.add_argument("--top-k", type=int, default=1000, help="top-k value to sample with")
226
+ parser.add_argument("--temperature", type=float, default=1.0, help="temperature value to sample with")
227
+ parser.add_argument("--top-p", type=float, default=1.0, help="top-p value to sample with")
228
+ args = parser.parse_args()
229
+ main(args)
autoregressive/serve/llm.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # vLLM: https://github.com/vllm-project/vllm/blob/main/vllm/entrypoints/llm.py
3
+ from typing import List, Optional, Union
4
+ import argparse
5
+
6
+ import torch
7
+ from tqdm import tqdm
8
+ from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
9
+
10
+ from vllm.engine.arg_utils import EngineArgs
11
+ # from vllm.engine.llm_engine import LLMEngine
12
+ from vllm.lora.request import LoRARequest
13
+ from vllm.outputs import RequestOutput
14
+ from vllm.sampling_params import SamplingParams
15
+ from vllm.sequence import MultiModalData
16
+ from vllm.usage.usage_lib import UsageContext
17
+ from vllm.utils import Counter
18
+
19
+ from autoregressive.serve.llm_engine import LLMEngine
20
+
21
+
22
+ class LLM:
23
+ """An LLM for generating texts from given prompts and sampling parameters.
24
+
25
+ This class includes a tokenizer, a language model (possibly distributed
26
+ across multiple GPUs), and GPU memory space allocated for intermediate
27
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
28
+ this class generates texts from the model, using an intelligent batching
29
+ mechanism and efficient memory management.
30
+
31
+ NOTE: This class is intended to be used for offline inference. For online
32
+ serving, use the `AsyncLLMEngine` class instead.
33
+ NOTE: For the comprehensive list of arguments, see `EngineArgs`.
34
+
35
+ Args:
36
+ model: The name or path of a HuggingFace Transformers model.
37
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
38
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
39
+ if available, and "slow" will always use the slow tokenizer.
40
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
41
+ detokenizer. Expect valid prompt_token_ids and None for prompt
42
+ from the input.
43
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
44
+ downloading the model and tokenizer.
45
+ tensor_parallel_size: The number of GPUs to use for distributed
46
+ execution with tensor parallelism.
47
+ dtype: The data type for the model weights and activations. Currently,
48
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
49
+ the `torch_dtype` attribute specified in the model config file.
50
+ However, if the `torch_dtype` in the config is `float32`, we will
51
+ use `float16` instead.
52
+ quantization: The method used to quantize the model weights. Currently,
53
+ we support "awq", "gptq", "squeezellm", and "fp8" (experimental).
54
+ If None, we first check the `quantization_config` attribute in the
55
+ model config file. If that is None, we assume the model weights are
56
+ not quantized and use `dtype` to determine the data type of
57
+ the weights.
58
+ revision: The specific model version to use. It can be a branch name,
59
+ a tag name, or a commit id.
60
+ tokenizer_revision: The specific tokenizer version to use. It can be a
61
+ branch name, a tag name, or a commit id.
62
+ seed: The seed to initialize the random number generator for sampling.
63
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
64
+ reserve for the model weights, activations, and KV cache. Higher
65
+ values will increase the KV cache size and thus improve the model's
66
+ throughput. However, if the value is too high, it may cause out-of-
67
+ memory (OOM) errors.
68
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
69
+ This can be used for temporarily storing the states of the requests
70
+ when their `best_of` sampling parameters are larger than 1. If all
71
+ requests will have `best_of=1`, you can safely set this to 0.
72
+ Otherwise, too small values may cause out-of-memory (OOM) errors.
73
+ enforce_eager: Whether to enforce eager execution. If True, we will
74
+ disable CUDA graph and always execute the model in eager mode.
75
+ If False, we will use CUDA graph and eager execution in hybrid.
76
+ max_context_len_to_capture: Maximum context len covered by CUDA graphs.
77
+ When a sequence has context length larger than this, we fall back
78
+ to eager mode.
79
+ disable_custom_all_reduce: See ParallelConfig
80
+ """
81
+
82
+ def __init__(
83
+ self,
84
+ args: argparse.ArgumentParser,
85
+ model: str,
86
+ tokenizer: Optional[str] = None,
87
+ tokenizer_mode: str = "auto",
88
+ skip_tokenizer_init: bool = False,
89
+ trust_remote_code: bool = False,
90
+ tensor_parallel_size: int = 1,
91
+ dtype: str = "auto",
92
+ quantization: Optional[str] = None,
93
+ revision: Optional[str] = None,
94
+ tokenizer_revision: Optional[str] = None,
95
+ seed: int = 0,
96
+ gpu_memory_utilization: float = 0.9,
97
+ swap_space: int = 4,
98
+ enforce_eager: bool = False,
99
+ max_context_len_to_capture: int = 8192,
100
+ disable_custom_all_reduce: bool = False,
101
+ **kwargs,
102
+ ) -> None:
103
+ if "disable_log_stats" not in kwargs:
104
+ kwargs["disable_log_stats"] = True
105
+ engine_args = EngineArgs(
106
+ model=model,
107
+ tokenizer=tokenizer,
108
+ tokenizer_mode=tokenizer_mode,
109
+ skip_tokenizer_init=skip_tokenizer_init,
110
+ trust_remote_code=trust_remote_code,
111
+ tensor_parallel_size=tensor_parallel_size,
112
+ dtype=dtype,
113
+ quantization=quantization,
114
+ revision=revision,
115
+ tokenizer_revision=tokenizer_revision,
116
+ seed=seed,
117
+ gpu_memory_utilization=gpu_memory_utilization,
118
+ swap_space=swap_space,
119
+ enforce_eager=enforce_eager,
120
+ max_context_len_to_capture=max_context_len_to_capture,
121
+ disable_custom_all_reduce=disable_custom_all_reduce,
122
+ **kwargs,
123
+ )
124
+ self.llm_engine = LLMEngine.from_engine_args(
125
+ engine_args, usage_context=UsageContext.LLM_CLASS, args=args)
126
+ self.request_counter = Counter()
127
+
128
+ def get_tokenizer(
129
+ self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
130
+ return self.llm_engine.tokenizer.tokenizer
131
+
132
+ def set_tokenizer(
133
+ self,
134
+ tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
135
+ ) -> None:
136
+ self.llm_engine.tokenizer.tokenizer = tokenizer
137
+
138
+ def generate(
139
+ self,
140
+ prompts: Optional[Union[str, List[str]]] = None,
141
+ sampling_params: Optional[Union[SamplingParams,
142
+ List[SamplingParams]]] = None,
143
+ prompt_token_ids: Optional[List[List[int]]] = None,
144
+ use_tqdm: bool = True,
145
+ lora_request: Optional[LoRARequest] = None,
146
+ multi_modal_data: Optional[MultiModalData] = None,
147
+ ) -> List[RequestOutput]:
148
+ """Generates the completions for the input prompts.
149
+
150
+ NOTE: This class automatically batches the given prompts, considering
151
+ the memory constraint. For the best performance, put all of your prompts
152
+ into a single list and pass it to this method.
153
+
154
+ Args:
155
+ prompts: A list of prompts to generate completions for.
156
+ sampling_params: The sampling parameters for text generation. If
157
+ None, we use the default sampling parameters.
158
+ When it is a single value, it is applied to every prompt.
159
+ When it is a list, the list must have the same length as the
160
+ prompts and it is paired one by one with the prompt.
161
+ prompt_token_ids: A list of token IDs for the prompts. If None, we
162
+ use the tokenizer to convert the prompts to token IDs.
163
+ use_tqdm: Whether to use tqdm to display the progress bar.
164
+ lora_request: LoRA request to use for generation, if any.
165
+ multi_modal_data: Multi modal data.
166
+
167
+ Returns:
168
+ A list of `RequestOutput` objects containing the generated
169
+ completions in the same order as the input prompts.
170
+ """
171
+ if prompts is None and prompt_token_ids is None:
172
+ raise ValueError("Either prompts or prompt_token_ids must be "
173
+ "provided.")
174
+ if self.llm_engine.model_config.skip_tokenizer_init \
175
+ and prompts is not None:
176
+ raise ValueError("prompts must be None if skip_tokenizer_init "
177
+ "is True")
178
+ if isinstance(prompts, str):
179
+ # Convert a single prompt to a list.
180
+ prompts = [prompts]
181
+ if (prompts is not None and prompt_token_ids is not None
182
+ and len(prompts) != len(prompt_token_ids)):
183
+ raise ValueError("The lengths of prompts and prompt_token_ids "
184
+ "must be the same.")
185
+
186
+ if prompts is not None:
187
+ num_requests = len(prompts)
188
+ else:
189
+ assert prompt_token_ids is not None
190
+ num_requests = len(prompt_token_ids)
191
+
192
+ if sampling_params is None:
193
+ # Use default sampling params.
194
+ sampling_params = SamplingParams()
195
+
196
+ elif isinstance(sampling_params,
197
+ list) and len(sampling_params) != num_requests:
198
+ raise ValueError("The lengths of prompts and sampling_params "
199
+ "must be the same.")
200
+ if multi_modal_data:
201
+ multi_modal_data.data = multi_modal_data.data.to(torch.float16)
202
+
203
+ # Add requests to the engine.
204
+ for i in range(num_requests):
205
+ prompt = prompts[i] if prompts is not None else None
206
+ token_ids = None if prompt_token_ids is None else prompt_token_ids[i]
207
+ self._add_request(
208
+ prompt,
209
+ sampling_params[i]
210
+ if isinstance(sampling_params, list) else sampling_params,
211
+ token_ids,
212
+ lora_request=lora_request,
213
+ # Get ith image while maintaining the batch dim.
214
+ multi_modal_data=MultiModalData(
215
+ type=multi_modal_data.type,
216
+ data=multi_modal_data.data[i].unsqueeze(0))
217
+ if multi_modal_data else None,
218
+ )
219
+ return self._run_engine(use_tqdm)
220
+
221
+ def _add_request(
222
+ self,
223
+ prompt: Optional[str],
224
+ sampling_params: SamplingParams,
225
+ prompt_token_ids: Optional[List[int]],
226
+ lora_request: Optional[LoRARequest] = None,
227
+ multi_modal_data: Optional[MultiModalData] = None,
228
+ ) -> None:
229
+ request_id = str(next(self.request_counter))
230
+ self.llm_engine.add_request(request_id,
231
+ prompt,
232
+ sampling_params,
233
+ prompt_token_ids,
234
+ lora_request=lora_request,
235
+ multi_modal_data=multi_modal_data)
236
+
237
+
238
+ def _run_engine(self, use_tqdm: bool) -> List[RequestOutput]:
239
+ # Initialize tqdm.
240
+ if use_tqdm:
241
+ num_requests = self.llm_engine.get_num_unfinished_requests()
242
+ pbar = tqdm(
243
+ total=num_requests,
244
+ desc="Processed prompts",
245
+ dynamic_ncols=True,
246
+ postfix=f"Generation Speed: {0:.2f} toks/s",
247
+ )
248
+ # Run the engine.
249
+ outputs: List[RequestOutput] = []
250
+ while self.llm_engine.has_unfinished_requests():
251
+ step_outputs = self.llm_engine.step()
252
+ for output in step_outputs:
253
+ if output.finished:
254
+ outputs.append(output)
255
+ if use_tqdm:
256
+ total_toks += (sum(
257
+ len(stp.token_ids) for stp in output.outputs))
258
+ spd = total_toks / pbar.format_dict["elapsed"]
259
+ pbar.postfix = f"Generation Speed: {spd:.2f} toks/s"
260
+ pbar.update(1)
261
+ if use_tqdm:
262
+ pbar.close()
263
+ # Sort the outputs by request ID.
264
+ # This is necessary because some requests may be finished earlier than
265
+ # its previous requests.
266
+ outputs = sorted(outputs, key=lambda x: int(x.request_id))
267
+ return outputs
create_npz.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from tqdm import tqdm
2
+ import os
3
+ from PIL import Image
4
+ import numpy as np
5
+ import argparse
6
+
7
+
8
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
9
+ """
10
+ Builds a single .npz file from a folder of .png samples.
11
+ """
12
+ samples = []
13
+ for file in tqdm(os.listdir(sample_dir), desc="Building .npz file from samples"):
14
+ sample_pil = Image.open(f"{sample_dir}/{file}")
15
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
16
+ samples.append(sample_np)
17
+ samples = np.stack(samples)
18
+ npz_path = f"{sample_dir}.npz"
19
+ np.savez(npz_path, arr_0=samples)
20
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
21
+ return npz_path
22
+
23
+
24
+ if __name__ == "__main__":
25
+ parser = argparse.ArgumentParser()
26
+ parser.add_argument("--generated-images", type=str, required=True)
27
+ args = parser.parse_args()
28
+ num_fid_samples = 50000
29
+ create_npz_from_sample_folder(args.generated_images, num_fid_samples)
30
+ print("Done.")
hfd.sh ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ # Color definitions
3
+ RED='\033[0;31m'; GREEN='\033[0;32m'; YELLOW='\033[1;33m'; NC='\033[0m' # No Color
4
+
5
+ trap 'printf "${YELLOW}\nDownload interrupted. You can resume by re-running the command.\n${NC}"; exit 1' INT
6
+
7
+ display_help() {
8
+ cat << EOF
9
+ Usage:
10
+ hfd <REPO_ID> [--include include_pattern1 include_pattern2 ...] [--exclude exclude_pattern1 exclude_pattern2 ...] [--hf_username username] [--hf_token token] [--tool aria2c|wget] [-x threads] [-j jobs] [--dataset] [--local-dir path] [--revision rev]
11
+
12
+ Description:
13
+ Downloads a model or dataset from Hugging Face using the provided repo ID.
14
+
15
+ Arguments:
16
+ REPO_ID The Hugging Face repo ID (Required)
17
+ Format: 'org_name/repo_name' or legacy format (e.g., gpt2)
18
+ Options:
19
+ include/exclude_pattern The patterns to match against file path, supports wildcard characters.
20
+ e.g., '--exclude *.safetensor *.md', '--include vae/*'.
21
+ --include (Optional) Patterns to include files for downloading (supports multiple patterns).
22
+ --exclude (Optional) Patterns to exclude files from downloading (supports multiple patterns).
23
+ --hf_username (Optional) Hugging Face username for authentication (not email).
24
+ --hf_token (Optional) Hugging Face token for authentication.
25
+ --tool (Optional) Download tool to use: aria2c (default) or wget.
26
+ -x (Optional) Number of download threads for aria2c (default: 4).
27
+ -j (Optional) Number of concurrent downloads for aria2c (default: 5).
28
+ --dataset (Optional) Flag to indicate downloading a dataset.
29
+ --local-dir (Optional) Directory path to store the downloaded data.
30
+ Defaults to the current directory with a subdirectory named 'repo_name'
31
+ if REPO_ID is is composed of 'org_name/repo_name'.
32
+ --revision (Optional) Model/Dataset revision to download (default: main).
33
+
34
+ Example:
35
+ hfd gpt2
36
+ hfd bigscience/bloom-560m --exclude *.safetensors
37
+ hfd meta-llama/Llama-2-7b --hf_username myuser --hf_token mytoken -x 4
38
+ hfd lavita/medical-qa-shared-task-v1-toy --dataset
39
+ hfd bartowski/Phi-3.5-mini-instruct-exl2 --revision 5_0
40
+ EOF
41
+ exit 1
42
+ }
43
+
44
+ [[ -z "$1" || "$1" =~ ^-h || "$1" =~ ^--help ]] && display_help
45
+
46
+ REPO_ID=$1
47
+ shift
48
+
49
+ # Default values
50
+ TOOL="aria2c"
51
+ THREADS=4
52
+ CONCURRENT=5
53
+ HF_ENDPOINT=${HF_ENDPOINT:-"https://huggingface.co"}
54
+ INCLUDE_PATTERNS=()
55
+ EXCLUDE_PATTERNS=()
56
+ REVISION="main"
57
+
58
+ validate_number() {
59
+ [[ "$2" =~ ^[1-9][0-9]*$ && "$2" -le "$3" ]] || { printf "${RED}[Error] $1 must be 1-$3${NC}\n"; exit 1; }
60
+ }
61
+
62
+ # Argument parsing
63
+ while [[ $# -gt 0 ]]; do
64
+ case $1 in
65
+ --include) shift; while [[ $# -gt 0 && ! ($1 =~ ^--) && ! ($1 =~ ^-[^-]) ]]; do INCLUDE_PATTERNS+=("$1"); shift; done ;;
66
+ --exclude) shift; while [[ $# -gt 0 && ! ($1 =~ ^--) && ! ($1 =~ ^-[^-]) ]]; do EXCLUDE_PATTERNS+=("$1"); shift; done ;;
67
+ --hf_username) HF_USERNAME="$2"; shift 2 ;;
68
+ --hf_token) HF_TOKEN="$2"; shift 2 ;;
69
+ --tool)
70
+ case $2 in
71
+ aria2c|wget)
72
+ TOOL="$2"
73
+ ;;
74
+ *)
75
+ printf "%b[Error] Invalid tool. Use 'aria2c' or 'wget'.%b\n" "$RED" "$NC"
76
+ exit 1
77
+ ;;
78
+ esac
79
+ shift 2
80
+ ;;
81
+ -x) validate_number "threads (-x)" "$2" 10; THREADS="$2"; shift 2 ;;
82
+ -j) validate_number "concurrent downloads (-j)" "$2" 10; CONCURRENT="$2"; shift 2 ;;
83
+ --dataset) DATASET=1; shift ;;
84
+ --local-dir) LOCAL_DIR="$2"; shift 2 ;;
85
+ --revision) REVISION="$2"; shift 2 ;;
86
+ *) display_help ;;
87
+ esac
88
+ done
89
+
90
+ # Generate current command string
91
+ generate_command_string() {
92
+ local cmd_string="REPO_ID=$REPO_ID"
93
+ cmd_string+=" TOOL=$TOOL"
94
+ cmd_string+=" INCLUDE_PATTERNS=${INCLUDE_PATTERNS[*]}"
95
+ cmd_string+=" EXCLUDE_PATTERNS=${EXCLUDE_PATTERNS[*]}"
96
+ cmd_string+=" DATASET=${DATASET:-0}"
97
+ cmd_string+=" HF_USERNAME=${HF_USERNAME:-}"
98
+ cmd_string+=" HF_TOKEN=${HF_TOKEN:-}"
99
+ cmd_string+=" HF_TOKEN=${HF_ENDPOINT:-}"
100
+ cmd_string+=" REVISION=$REVISION"
101
+ echo "$cmd_string"
102
+ }
103
+
104
+ # Check if aria2, wget, curl are installed
105
+ check_command() {
106
+ if ! command -v $1 &>/dev/null; then
107
+ printf "%b%s is not installed. Please install it first.%b\n" "$RED" "$1" "$NC"
108
+ exit 1
109
+ fi
110
+ }
111
+
112
+ check_command curl; check_command "$TOOL"
113
+
114
+ LOCAL_DIR="${LOCAL_DIR:-${REPO_ID#*/}}"
115
+ mkdir -p "$LOCAL_DIR/.hfd"
116
+
117
+ if [[ "$DATASET" == 1 ]]; then
118
+ METADATA_API_PATH="datasets/$REPO_ID"
119
+ DOWNLOAD_API_PATH="datasets/$REPO_ID"
120
+ CUT_DIRS=5
121
+ else
122
+ METADATA_API_PATH="models/$REPO_ID"
123
+ DOWNLOAD_API_PATH="$REPO_ID"
124
+ CUT_DIRS=4
125
+ fi
126
+
127
+ # Modify API URL, construct based on revision
128
+ if [[ "$REVISION" != "main" ]]; then
129
+ METADATA_API_PATH="$METADATA_API_PATH/revision/$REVISION"
130
+ fi
131
+ API_URL="$HF_ENDPOINT/api/$METADATA_API_PATH"
132
+
133
+ METADATA_FILE="$LOCAL_DIR/.hfd/repo_metadata.json"
134
+
135
+ # Fetch and save metadata
136
+ fetch_and_save_metadata() {
137
+ status_code=$(curl -L -s -w "%{http_code}" -o "$METADATA_FILE" ${HF_TOKEN:+-H "Authorization: Bearer $HF_TOKEN"} "$API_URL")
138
+ RESPONSE=$(cat "$METADATA_FILE")
139
+ if [ "$status_code" -eq 200 ]; then
140
+ printf "%s\n" "$RESPONSE"
141
+ else
142
+ printf "%b[Error] Failed to fetch metadata from $API_URL. HTTP status code: $status_code.%b\n$RESPONSE\n" "${RED}" "${NC}" >&2
143
+ rm $METADATA_FILE
144
+ exit 1
145
+ fi
146
+ }
147
+
148
+ check_authentication() {
149
+ local response="$1"
150
+ if command -v jq &>/dev/null; then
151
+ local gated
152
+ gated=$(echo "$response" | jq -r '.gated // false')
153
+ if [[ "$gated" != "false" && ( -z "$HF_TOKEN" || -z "$HF_USERNAME" ) ]]; then
154
+ printf "${RED}The repository requires authentication, but --hf_username and --hf_token is not passed. Please get token from https://huggingface.co/settings/tokens.\nExiting.\n${NC}"
155
+ exit 1
156
+ fi
157
+ else
158
+ if echo "$response" | grep -q '"gated":[^f]' && [[ -z "$HF_TOKEN" || -z "$HF_USERNAME" ]]; then
159
+ printf "${RED}The repository requires authentication, but --hf_username and --hf_token is not passed. Please get token from https://huggingface.co/settings/tokens.\nExiting.\n${NC}"
160
+ exit 1
161
+ fi
162
+ fi
163
+ }
164
+
165
+ if [[ ! -f "$METADATA_FILE" ]]; then
166
+ printf "%bFetching repo metadata...%b\n" "$YELLOW" "$NC"
167
+ RESPONSE=$(fetch_and_save_metadata) || exit 1
168
+ check_authentication "$RESPONSE"
169
+ else
170
+ printf "%bUsing cached metadata: $METADATA_FILE%b\n" "$GREEN" "$NC"
171
+ RESPONSE=$(cat "$METADATA_FILE")
172
+ check_authentication "$RESPONSE"
173
+ fi
174
+
175
+ should_regenerate_filelist() {
176
+ local command_file="$LOCAL_DIR/.hfd/last_download_command"
177
+ local current_command=$(generate_command_string)
178
+
179
+ # If file list doesn't exist, regenerate
180
+ if [[ ! -f "$LOCAL_DIR/$fileslist_file" ]]; then
181
+ echo "$current_command" > "$command_file"
182
+ return 0
183
+ fi
184
+
185
+ # If command file doesn't exist, regenerate
186
+ if [[ ! -f "$command_file" ]]; then
187
+ echo "$current_command" > "$command_file"
188
+ return 0
189
+ fi
190
+
191
+ # Compare current command with saved command
192
+ local saved_command=$(cat "$command_file")
193
+ if [[ "$current_command" != "$saved_command" ]]; then
194
+ echo "$current_command" > "$command_file"
195
+ return 0
196
+ fi
197
+
198
+ return 1
199
+ }
200
+
201
+ fileslist_file=".hfd/${TOOL}_urls.txt"
202
+
203
+ if should_regenerate_filelist; then
204
+ # Remove existing file list if it exists
205
+ [[ -f "$LOCAL_DIR/$fileslist_file" ]] && rm "$LOCAL_DIR/$fileslist_file"
206
+
207
+ printf "%bGenerating file list...%b\n" "$YELLOW" "$NC"
208
+
209
+ # Convert include and exclude patterns to regex
210
+ INCLUDE_REGEX=""
211
+ EXCLUDE_REGEX=""
212
+ if ((${#INCLUDE_PATTERNS[@]})); then
213
+ INCLUDE_REGEX=$(printf '%s\n' "${INCLUDE_PATTERNS[@]}" | sed 's/\./\\./g; s/\*/.*/g' | paste -sd '|' -)
214
+ fi
215
+ if ((${#EXCLUDE_PATTERNS[@]})); then
216
+ EXCLUDE_REGEX=$(printf '%s\n' "${EXCLUDE_PATTERNS[@]}" | sed 's/\./\\./g; s/\*/.*/g' | paste -sd '|' -)
217
+ fi
218
+
219
+ # Check if jq is available
220
+ if command -v jq &>/dev/null; then
221
+ process_with_jq() {
222
+ if [[ "$TOOL" == "aria2c" ]]; then
223
+ printf "%s" "$RESPONSE" | jq -r \
224
+ --arg endpoint "$HF_ENDPOINT" \
225
+ --arg repo_id "$DOWNLOAD_API_PATH" \
226
+ --arg token "$HF_TOKEN" \
227
+ --arg include_regex "$INCLUDE_REGEX" \
228
+ --arg exclude_regex "$EXCLUDE_REGEX" \
229
+ --arg revision "$REVISION" \
230
+ '
231
+ .siblings[]
232
+ | select(
233
+ .rfilename != null
234
+ and ($include_regex == "" or (.rfilename | test($include_regex)))
235
+ and ($exclude_regex == "" or (.rfilename | test($exclude_regex) | not))
236
+ )
237
+ | [
238
+ ($endpoint + "/" + $repo_id + "/resolve/" + $revision + "/" + .rfilename),
239
+ " dir=" + (.rfilename | split("/")[:-1] | join("/")),
240
+ " out=" + (.rfilename | split("/")[-1]),
241
+ if $token != "" then " header=Authorization: Bearer " + $token else empty end,
242
+ ""
243
+ ]
244
+ | join("\n")
245
+ '
246
+ else
247
+ printf "%s" "$RESPONSE" | jq -r \
248
+ --arg endpoint "$HF_ENDPOINT" \
249
+ --arg repo_id "$DOWNLOAD_API_PATH" \
250
+ --arg include_regex "$INCLUDE_REGEX" \
251
+ --arg exclude_regex "$EXCLUDE_REGEX" \
252
+ --arg revision "$REVISION" \
253
+ '
254
+ .siblings[]
255
+ | select(
256
+ .rfilename != null
257
+ and ($include_regex == "" or (.rfilename | test($include_regex)))
258
+ and ($exclude_regex == "" or (.rfilename | test($exclude_regex) | not))
259
+ )
260
+ | ($endpoint + "/" + $repo_id + "/resolve/" + $revision + "/" + .rfilename)
261
+ '
262
+ fi
263
+ }
264
+ result=$(process_with_jq)
265
+ printf "%s\n" "$result" > "$LOCAL_DIR/$fileslist_file"
266
+ else
267
+ printf "%b[Warning] jq not installed, using grep/awk for metadata json parsing (slower). Consider installing jq for better parsing performance.%b\n" "$YELLOW" "$NC"
268
+ process_with_grep_awk() {
269
+ local include_pattern=""
270
+ local exclude_pattern=""
271
+ local output=""
272
+
273
+ if ((${#INCLUDE_PATTERNS[@]})); then
274
+ include_pattern=$(printf '%s\n' "${INCLUDE_PATTERNS[@]}" | sed 's/\./\\./g; s/\*/.*/g' | paste -sd '|' -)
275
+ fi
276
+ if ((${#EXCLUDE_PATTERNS[@]})); then
277
+ exclude_pattern=$(printf '%s\n' "${EXCLUDE_PATTERNS[@]}" | sed 's/\./\\./g; s/\*/.*/g' | paste -sd '|' -)
278
+ fi
279
+
280
+ local files=$(printf '%s' "$RESPONSE" | grep -o '"rfilename":"[^"]*"' | awk -F'"' '{print $4}')
281
+
282
+ if [[ -n "$include_pattern" ]]; then
283
+ files=$(printf '%s\n' "$files" | grep -E "$include_pattern")
284
+ fi
285
+ if [[ -n "$exclude_pattern" ]]; then
286
+ files=$(printf '%s\n' "$files" | grep -vE "$exclude_pattern")
287
+ fi
288
+
289
+ while IFS= read -r file; do
290
+ if [[ -n "$file" ]]; then
291
+ if [[ "$TOOL" == "aria2c" ]]; then
292
+ output+="$HF_ENDPOINT/$DOWNLOAD_API_PATH/resolve/$REVISION/$file"$'\n'
293
+ output+=" dir=$(dirname "$file")"$'\n'
294
+ output+=" out=$(basename "$file")"$'\n'
295
+ [[ -n "$HF_TOKEN" ]] && output+=" header=Authorization: Bearer $HF_TOKEN"$'\n'
296
+ output+=$'\n'
297
+ else
298
+ output+="$HF_ENDPOINT/$DOWNLOAD_API_PATH/resolve/$REVISION/$file"$'\n'
299
+ fi
300
+ fi
301
+ done <<< "$files"
302
+
303
+ printf '%s' "$output"
304
+ }
305
+
306
+ result=$(process_with_grep_awk)
307
+ printf "%s\n" "$result" > "$LOCAL_DIR/$fileslist_file"
308
+ fi
309
+ else
310
+ printf "%bResume from file list: $LOCAL_DIR/$fileslist_file%b\n" "$GREEN" "$NC"
311
+ fi
312
+
313
+ # Perform download
314
+ printf "${YELLOW}Starting download with $TOOL to $LOCAL_DIR...\n${NC}"
315
+
316
+ cd "$LOCAL_DIR"
317
+ if [[ "$TOOL" == "aria2c" ]]; then
318
+ aria2c --console-log-level=error --file-allocation=none -x "$THREADS" -j "$CONCURRENT" -s "$THREADS" -k 1M -c -i "$fileslist_file" --save-session="$fileslist_file"
319
+ elif [[ "$TOOL" == "wget" ]]; then
320
+ wget -x -nH --cut-dirs="$CUT_DIRS" ${HF_TOKEN:+--header="Authorization: Bearer $HF_TOKEN"} --input-file="$fileslist_file" --continue
321
+ fi
322
+
323
+ if [[ $? -eq 0 ]]; then
324
+ printf "${GREEN}Download completed successfully. Repo directory: $PWD\n${NC}"
325
+ else
326
+ printf "${RED}Download encountered errors.\n${NC}"
327
+ exit 1
328
+ fi
requirements.txt ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torchvision==0.16.2
2
+ opencv-python==4.9.0.80
3
+ matplotlib==3.9.0
4
+ numpy==1.26.4
5
+ einops
6
+ datasets
7
+ tensorflow==2.16.1
8
+ scikit-learn
9
+ scikit-image
10
+ ftfy
11
+ bs4
12
+ timm
13
+ torchmetrics
14
+ accelerate
15
+ controlnet_aux
16
+ ftfy
17
+ clean-fid
18
+ safetensors
19
+ transformers
20
+ tiktoken
21
+ sentencepiece
22
+ basicsr
scripts/tokenizer/train_vq_finetune.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
6
+ --master_addr=127.0.0.1 --master_port=12345 \
7
+ tokenizer/tokenizer_image/vq_train.py \
8
+ --finetune \
9
+ --disc-start 0 \
10
+ --vq-ckpt vq_ds16_c2i.pt \
11
+ --dataset imagenet_code \
12
+ --cloud-save-path output/cloud_disk \
13
+ "$@"
14
+
scripts/tokenizer/train_vq_finetune_continue.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=$nnodes --nproc_per_node=$nproc_per_node --node_rank=$node_rank \
6
+ --master_addr=$master_addr --master_port=$master_port \
7
+ tokenizer/tokenizer_image/vq_train.py \
8
+ --disc-start 0 \
9
+ --dataset t2i_image \
10
+ --data-path /path/to/high_aesthetic_10M \
11
+ --data-face-path /path/to/face_2M \
12
+ --cloud-save-path /path/to/cloud_disk \
13
+ "$@"
14
+
15
+ # --vq-ckpt xxx.pt
scripts/tokenizer/val.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # !/bin/bash
2
+ set -x
3
+
4
+ torchrun \
5
+ --nnodes=1 --nproc_per_node=4 --node_rank=0 \
6
+ --master_port=12343 \
7
+ tokenizer/validation/val_ddp.py \
8
+ "$@"
test_dataset_t2icontrol.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import argparse
4
+ import torch
5
+ from torchvision import transforms
6
+ from torch.utils.data import DataLoader
7
+ from PIL import Image
8
+ from dataset.t2i_control import T2IControlCode
9
+ from tqdm import tqdm
10
+
11
+
12
+
13
+
14
+ def get_args():
15
+ parser = argparse.ArgumentParser()
16
+ parser.add_argument('--code_path', type=str, required=True, help='根目录,包含 code/control/image/caption_emb 等文件夹')
17
+ parser.add_argument('--code_path2', type=str, default=None, help='第二组数据路径 (可选)')
18
+ parser.add_argument('--image_size', type=int, default=512)
19
+ parser.add_argument('--downsample_size', type=int, default=8)
20
+ parser.add_argument('--condition_type', type=str, default='seg', choices=['seg', 'depth', 'canny', 'hed', 'lineart'], help='控制类型')
21
+ parser.add_argument('--get_image', action='store_true', help='是否返回 image')
22
+ parser.add_argument('--get_prompt', action='store_true', help='是否返回 prompt')
23
+ parser.add_argument('--get_label', action='store_true', help='是否返回 label')
24
+ parser.add_argument('--max_show', type=int, default=5, help='最多显示多少条样本')
25
+ return parser.parse_args()
26
+
27
+ def main():
28
+ args = get_args()
29
+
30
+ dataset = T2IControlCode(args)
31
+ print(f"\n📦 数据集大小: {len(dataset)}")
32
+
33
+ loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, collate_fn=dataset.collate_fn)
34
+
35
+ for i, batch in enumerate(tqdm(loader)):
36
+ print(f"\n🟡 Sample #{i}")
37
+ print(f" - code shape: {batch['code'].shape}")
38
+ print(f" - control shape: {batch['control'].shape}")
39
+ print(f" - caption_emb shape: {batch['caption_emb'].shape}")
40
+ print(f" - attention mask shape: {batch['attn_mask'].shape}")
41
+ print(f" - valid: {batch['valid'].item()}")
42
+
43
+ if args.get_image:
44
+ print(f" - image shape: {batch['image'].shape}")
45
+ if args.get_prompt:
46
+ print(f" - prompt: {batch['prompt']}")
47
+ if args.get_label:
48
+ print(f" - label shape: {batch['label'].shape}")
49
+
50
+ if i + 1 >= args.max_show:
51
+ break
52
+
53
+ if __name__ == "__main__":
54
+ main()
55
+
tokenizer/consistencydecoder/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Consistency Decoder from OpenAI
2
+
3
+ ### install
4
+ ```
5
+ pip install diffusers
6
+ pip install accelerate
7
+ ```
8
+
9
+ ### demo
10
+ ```
11
+ cd ${THIS_REPO_ROOT}
12
+ python3 tokenizer/consistencydecoder/cd_demo.py
13
+ ```
14
+
tokenizer/consistencydecoder/reconstruction_cd_ddp.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.distributed as dist
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torch.utils.data.distributed import DistributedSampler
7
+ from torchvision.datasets import ImageFolder
8
+ from torchvision import transforms
9
+ from tqdm import tqdm
10
+ import os
11
+ import itertools
12
+ from PIL import Image
13
+ import numpy as np
14
+ import argparse
15
+ import random
16
+
17
+ from skimage.metrics import peak_signal_noise_ratio as psnr_loss
18
+ from skimage.metrics import structural_similarity as ssim_loss
19
+ from diffusers.models import ConsistencyDecoderVAE
20
+
21
+
22
+ class SingleFolderDataset(Dataset):
23
+ def __init__(self, directory, transform=None):
24
+ super().__init__()
25
+ self.directory = directory
26
+ self.transform = transform
27
+ self.image_paths = [os.path.join(directory, file_name) for file_name in os.listdir(directory)
28
+ if os.path.isfile(os.path.join(directory, file_name))]
29
+
30
+ def __len__(self):
31
+ return len(self.image_paths)
32
+
33
+ def __getitem__(self, idx):
34
+ image_path = self.image_paths[idx]
35
+ image = Image.open(image_path).convert('RGB')
36
+ if self.transform:
37
+ image = self.transform(image)
38
+ return image, torch.tensor(0)
39
+
40
+
41
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
42
+ """
43
+ Builds a single .npz file from a folder of .png samples.
44
+ """
45
+ samples = []
46
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
47
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
48
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
49
+ samples.append(sample_np)
50
+
51
+ random.shuffle(samples) # This is very important for IS(Inception Score) !!!
52
+ samples = np.stack(samples)
53
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
54
+ npz_path = f"{sample_dir}.npz"
55
+ np.savez(npz_path, arr_0=samples)
56
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
57
+ return npz_path
58
+
59
+
60
+ def center_crop_arr(pil_image, image_size):
61
+ """
62
+ Center cropping implementation from ADM.
63
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
64
+ """
65
+ while min(*pil_image.size) >= 2 * image_size:
66
+ pil_image = pil_image.resize(
67
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
68
+ )
69
+
70
+ scale = image_size / min(*pil_image.size)
71
+ pil_image = pil_image.resize(
72
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
73
+ )
74
+
75
+ arr = np.array(pil_image)
76
+ crop_y = (arr.shape[0] - image_size) // 2
77
+ crop_x = (arr.shape[1] - image_size) // 2
78
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
79
+
80
+
81
+ def main(args):
82
+ # Setup PyTorch:
83
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
84
+ torch.set_grad_enabled(False)
85
+
86
+ # Setup env
87
+ dist.init_process_group("nccl")
88
+ rank = dist.get_rank()
89
+ device = rank % torch.cuda.device_count()
90
+ seed = args.global_seed * dist.get_world_size() + rank
91
+ torch.manual_seed(seed)
92
+ torch.cuda.set_device(device)
93
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
94
+
95
+ # create and load model
96
+ vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16).to("cuda:{}".format(device))
97
+
98
+ # Create folder to save samples:
99
+ folder_name = f"openai-consistencydecoder-{args.dataset}-size-{args.image_size}-seed-{args.global_seed}"
100
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
101
+ if rank == 0:
102
+ os.makedirs(sample_folder_dir, exist_ok=True)
103
+ print(f"Saving .png samples at {sample_folder_dir}")
104
+ dist.barrier()
105
+
106
+ # Setup data:
107
+ transform = transforms.Compose([
108
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
109
+ transforms.ToTensor(),
110
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
111
+ ])
112
+ if args.dataset == 'imagenet':
113
+ dataset = ImageFolder(args.data_path, transform=transform)
114
+ num_fid_samples = 50000
115
+ elif args.dataset == 'coco':
116
+ dataset = SingleFolderDataset(args.data_path, transform=transform)
117
+ num_fid_samples = 5000
118
+ else:
119
+ raise Exception("please check dataset")
120
+ sampler = DistributedSampler(
121
+ dataset,
122
+ num_replicas=dist.get_world_size(),
123
+ rank=rank,
124
+ shuffle=False,
125
+ seed=args.global_seed
126
+ )
127
+ loader = DataLoader(
128
+ dataset,
129
+ batch_size=args.per_proc_batch_size,
130
+ shuffle=False,
131
+ sampler=sampler,
132
+ num_workers=args.num_workers,
133
+ pin_memory=True,
134
+ drop_last=False
135
+ )
136
+
137
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
138
+ n = args.per_proc_batch_size
139
+ global_batch_size = n * dist.get_world_size()
140
+ psnr_val_rgb = []
141
+ ssim_val_rgb = []
142
+
143
+ loader = tqdm(loader) if rank == 0 else loader
144
+ total = 0
145
+ for x, _ in loader:
146
+ rgb_gts = x
147
+ rgb_gts = (rgb_gts.permute(0, 2, 3, 1).to("cpu").numpy() + 1.0) / 2.0 # rgb_gt value is between [0, 1]
148
+ x = x.half().to("cuda:{}".format(device))
149
+ with torch.no_grad():
150
+ # Map input images to latent space + normalize latents:
151
+ latent = vae.encode(x).latent_dist.sample().mul_(0.18215)
152
+ # reconstruct:
153
+ samples = vae.decode(latent / 0.18215).sample # output value is between [-1, 1]
154
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
155
+
156
+ # Save samples to disk as individual .png files
157
+ for i, (sample, rgb_gt) in enumerate(zip(samples, rgb_gts)):
158
+ index = i * dist.get_world_size() + rank + total
159
+ Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
160
+ # metric
161
+ rgb_restored = sample.astype(np.float32) / 255. # rgb_restored value is between [0, 1]
162
+ psnr = psnr_loss(rgb_restored, rgb_gt)
163
+ ssim = ssim_loss(rgb_restored, rgb_gt, multichannel=True, data_range=2.0, channel_axis=-1)
164
+ psnr_val_rgb.append(psnr)
165
+ ssim_val_rgb.append(ssim)
166
+ total += global_batch_size
167
+
168
+ # ------------------------------------
169
+ # Summary
170
+ # ------------------------------------
171
+ # Make sure all processes have finished saving their samples
172
+ dist.barrier()
173
+ world_size = dist.get_world_size()
174
+ gather_psnr_val = [None for _ in range(world_size)]
175
+ gather_ssim_val = [None for _ in range(world_size)]
176
+ dist.all_gather_object(gather_psnr_val, psnr_val_rgb)
177
+ dist.all_gather_object(gather_ssim_val, ssim_val_rgb)
178
+
179
+ if rank == 0:
180
+ gather_psnr_val = list(itertools.chain(*gather_psnr_val))
181
+ gather_ssim_val = list(itertools.chain(*gather_ssim_val))
182
+ psnr_val_rgb = sum(gather_psnr_val) / len(gather_psnr_val)
183
+ ssim_val_rgb = sum(gather_ssim_val) / len(gather_ssim_val)
184
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb))
185
+
186
+ result_file = f"{sample_folder_dir}_results.txt"
187
+ print("writing results to {}".format(result_file))
188
+ with open(result_file, 'w') as f:
189
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb), file=f)
190
+
191
+ create_npz_from_sample_folder(sample_folder_dir, num_fid_samples)
192
+ print("Done.")
193
+
194
+ dist.barrier()
195
+ dist.destroy_process_group()
196
+
197
+
198
+ if __name__ == "__main__":
199
+ parser = argparse.ArgumentParser()
200
+ parser.add_argument("--data-path", type=str, required=True)
201
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco'], default='imagenet')
202
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
203
+ parser.add_argument("--sample-dir", type=str, default="reconstructions")
204
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
205
+ parser.add_argument("--global-seed", type=int, default=0)
206
+ parser.add_argument("--num-workers", type=int, default=4)
207
+ args = parser.parse_args()
208
+ main(args)
tokenizer/tokenizer_image/discriminator.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # taming-transformers: https://github.com/CompVis/taming-transformers
3
+ # stylegan2-pytorch: https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py
4
+ # maskgit: https://github.com/google-research/maskgit/blob/main/maskgit/nets/discriminator.py
5
+ import functools
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ try:
10
+ from kornia.filters import filter2d
11
+ except:
12
+ pass
13
+
14
+ #################################################################################
15
+ # PatchGAN #
16
+ #################################################################################
17
+ class PatchGANDiscriminator(nn.Module):
18
+ """Defines a PatchGAN discriminator as in Pix2Pix
19
+ --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
20
+ """
21
+ def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
22
+ """Construct a PatchGAN discriminator
23
+ Parameters:
24
+ input_nc (int) -- the number of channels in input images
25
+ ndf (int) -- the number of filters in the last conv layer
26
+ n_layers (int) -- the number of conv layers in the discriminator
27
+ norm_layer -- normalization layer
28
+ """
29
+ super(PatchGANDiscriminator, self).__init__()
30
+ if not use_actnorm:
31
+ norm_layer = nn.BatchNorm2d
32
+ else:
33
+ norm_layer = ActNorm
34
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
35
+ use_bias = norm_layer.func != nn.BatchNorm2d
36
+ else:
37
+ use_bias = norm_layer != nn.BatchNorm2d
38
+
39
+ kw = 4
40
+ padw = 1
41
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
42
+ nf_mult = 1
43
+ nf_mult_prev = 1
44
+ for n in range(1, n_layers): # gradually increase the number of filters
45
+ nf_mult_prev = nf_mult
46
+ nf_mult = min(2 ** n, 8)
47
+ sequence += [
48
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
49
+ norm_layer(ndf * nf_mult),
50
+ nn.LeakyReLU(0.2, True)
51
+ ]
52
+
53
+ nf_mult_prev = nf_mult
54
+ nf_mult = min(2 ** n_layers, 8)
55
+ sequence += [
56
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
57
+ norm_layer(ndf * nf_mult),
58
+ nn.LeakyReLU(0.2, True)
59
+ ]
60
+
61
+ sequence += [
62
+ nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
63
+ self.main = nn.Sequential(*sequence)
64
+
65
+ self.apply(self._init_weights)
66
+
67
+ def _init_weights(self, module):
68
+ if isinstance(module, nn.Conv2d):
69
+ nn.init.normal_(module.weight.data, 0.0, 0.02)
70
+ elif isinstance(module, nn.BatchNorm2d):
71
+ nn.init.normal_(module.weight.data, 1.0, 0.02)
72
+ nn.init.constant_(module.bias.data, 0)
73
+
74
+ def forward(self, input):
75
+ """Standard forward."""
76
+ return self.main(input)
77
+
78
+
79
+ class ActNorm(nn.Module):
80
+ def __init__(self, num_features, logdet=False, affine=True,
81
+ allow_reverse_init=False):
82
+ assert affine
83
+ super().__init__()
84
+ self.logdet = logdet
85
+ self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
86
+ self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
87
+ self.allow_reverse_init = allow_reverse_init
88
+
89
+ self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
90
+
91
+ def initialize(self, input):
92
+ with torch.no_grad():
93
+ flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
94
+ mean = (
95
+ flatten.mean(1)
96
+ .unsqueeze(1)
97
+ .unsqueeze(2)
98
+ .unsqueeze(3)
99
+ .permute(1, 0, 2, 3)
100
+ )
101
+ std = (
102
+ flatten.std(1)
103
+ .unsqueeze(1)
104
+ .unsqueeze(2)
105
+ .unsqueeze(3)
106
+ .permute(1, 0, 2, 3)
107
+ )
108
+
109
+ self.loc.data.copy_(-mean)
110
+ self.scale.data.copy_(1 / (std + 1e-6))
111
+
112
+ def forward(self, input, reverse=False):
113
+ if reverse:
114
+ return self.reverse(input)
115
+ if len(input.shape) == 2:
116
+ input = input[:,:,None,None]
117
+ squeeze = True
118
+ else:
119
+ squeeze = False
120
+
121
+ _, _, height, width = input.shape
122
+
123
+ if self.training and self.initialized.item() == 0:
124
+ self.initialize(input)
125
+ self.initialized.fill_(1)
126
+
127
+ h = self.scale * (input + self.loc)
128
+
129
+ if squeeze:
130
+ h = h.squeeze(-1).squeeze(-1)
131
+
132
+ if self.logdet:
133
+ log_abs = torch.log(torch.abs(self.scale))
134
+ logdet = height*width*torch.sum(log_abs)
135
+ logdet = logdet * torch.ones(input.shape[0]).to(input)
136
+ return h, logdet
137
+
138
+ return h
139
+
140
+ def reverse(self, output):
141
+ if self.training and self.initialized.item() == 0:
142
+ if not self.allow_reverse_init:
143
+ raise RuntimeError(
144
+ "Initializing ActNorm in reverse direction is "
145
+ "disabled by default. Use allow_reverse_init=True to enable."
146
+ )
147
+ else:
148
+ self.initialize(output)
149
+ self.initialized.fill_(1)
150
+
151
+ if len(output.shape) == 2:
152
+ output = output[:,:,None,None]
153
+ squeeze = True
154
+ else:
155
+ squeeze = False
156
+
157
+ h = output / self.scale - self.loc
158
+
159
+ if squeeze:
160
+ h = h.squeeze(-1).squeeze(-1)
161
+ return h
162
+
163
+
164
+
165
+ #################################################################################
166
+ # StyleGAN #
167
+ #################################################################################
168
+ class StyleGANDiscriminator(nn.Module):
169
+ def __init__(self, input_nc=3, ndf=64, n_layers=3, channel_multiplier=1, image_size=256):
170
+ super().__init__()
171
+ channels = {
172
+ 4: 512,
173
+ 8: 512,
174
+ 16: 512,
175
+ 32: 512,
176
+ 64: 256 * channel_multiplier,
177
+ 128: 128 * channel_multiplier,
178
+ 256: 64 * channel_multiplier,
179
+ 512: 32 * channel_multiplier,
180
+ 1024: 16 * channel_multiplier,
181
+ }
182
+
183
+ log_size = int(math.log(image_size, 2))
184
+ in_channel = channels[image_size]
185
+
186
+ blocks = [nn.Conv2d(input_nc, in_channel, 3, padding=1), leaky_relu()]
187
+ for i in range(log_size, 2, -1):
188
+ out_channel = channels[2 ** (i - 1)]
189
+ blocks.append(DiscriminatorBlock(in_channel, out_channel))
190
+ in_channel = out_channel
191
+ self.blocks = nn.ModuleList(blocks)
192
+
193
+ self.final_conv = nn.Sequential(
194
+ nn.Conv2d(in_channel, channels[4], 3, padding=1),
195
+ leaky_relu(),
196
+ )
197
+ self.final_linear = nn.Sequential(
198
+ nn.Linear(channels[4] * 4 * 4, channels[4]),
199
+ leaky_relu(),
200
+ nn.Linear(channels[4], 1)
201
+ )
202
+
203
+ def forward(self, x):
204
+ for block in self.blocks:
205
+ x = block(x)
206
+ x = self.final_conv(x)
207
+ x = x.view(x.shape[0], -1)
208
+ x = self.final_linear(x)
209
+ return x
210
+
211
+
212
+ class DiscriminatorBlock(nn.Module):
213
+ def __init__(self, input_channels, filters, downsample=True):
214
+ super().__init__()
215
+ self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
216
+
217
+ self.net = nn.Sequential(
218
+ nn.Conv2d(input_channels, filters, 3, padding=1),
219
+ leaky_relu(),
220
+ nn.Conv2d(filters, filters, 3, padding=1),
221
+ leaky_relu()
222
+ )
223
+
224
+ self.downsample = nn.Sequential(
225
+ Blur(),
226
+ nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
227
+ ) if downsample else None
228
+
229
+ def forward(self, x):
230
+ res = self.conv_res(x)
231
+ x = self.net(x)
232
+ if exists(self.downsample):
233
+ x = self.downsample(x)
234
+ x = (x + res) * (1 / math.sqrt(2))
235
+ return x
236
+
237
+
238
+ class Blur(nn.Module):
239
+ def __init__(self):
240
+ super().__init__()
241
+ f = torch.Tensor([1, 2, 1])
242
+ self.register_buffer('f', f)
243
+
244
+ def forward(self, x):
245
+ f = self.f
246
+ f = f[None, None, :] * f [None, :, None]
247
+ return filter2d(x, f, normalized=True)
248
+
249
+
250
+ def leaky_relu(p=0.2):
251
+ return nn.LeakyReLU(p, inplace=True)
252
+
253
+
254
+ def exists(val):
255
+ return val is not None
tokenizer/tokenizer_image/discriminator_patchgan.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # taming-transformers: https://github.com/CompVis/taming-transformers
3
+ import functools
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+
8
+ class NLayerDiscriminator(nn.Module):
9
+ """Defines a PatchGAN discriminator as in Pix2Pix
10
+ --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
11
+ """
12
+ def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
13
+ """Construct a PatchGAN discriminator
14
+ Parameters:
15
+ input_nc (int) -- the number of channels in input images
16
+ ndf (int) -- the number of filters in the last conv layer
17
+ n_layers (int) -- the number of conv layers in the discriminator
18
+ norm_layer -- normalization layer
19
+ """
20
+ super(NLayerDiscriminator, self).__init__()
21
+ if not use_actnorm:
22
+ norm_layer = nn.BatchNorm2d
23
+ else:
24
+ norm_layer = ActNorm
25
+ if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
26
+ use_bias = norm_layer.func != nn.BatchNorm2d
27
+ else:
28
+ use_bias = norm_layer != nn.BatchNorm2d
29
+
30
+ kw = 4
31
+ padw = 1
32
+ sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
33
+ nf_mult = 1
34
+ nf_mult_prev = 1
35
+ for n in range(1, n_layers): # gradually increase the number of filters
36
+ nf_mult_prev = nf_mult
37
+ nf_mult = min(2 ** n, 8)
38
+ sequence += [
39
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
40
+ norm_layer(ndf * nf_mult),
41
+ nn.LeakyReLU(0.2, True)
42
+ ]
43
+
44
+ nf_mult_prev = nf_mult
45
+ nf_mult = min(2 ** n_layers, 8)
46
+ sequence += [
47
+ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
48
+ norm_layer(ndf * nf_mult),
49
+ nn.LeakyReLU(0.2, True)
50
+ ]
51
+
52
+ sequence += [
53
+ nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
54
+ self.main = nn.Sequential(*sequence)
55
+
56
+ self.apply(self._init_weights)
57
+
58
+ def _init_weights(self, module):
59
+ if isinstance(module, nn.Conv2d):
60
+ nn.init.normal_(module.weight.data, 0.0, 0.02)
61
+ elif isinstance(module, nn.BatchNorm2d):
62
+ nn.init.normal_(module.weight.data, 1.0, 0.02)
63
+ nn.init.constant_(module.bias.data, 0)
64
+
65
+ def forward(self, input):
66
+ """Standard forward."""
67
+ return self.main(input)
68
+
69
+
70
+ class ActNorm(nn.Module):
71
+ def __init__(self, num_features, logdet=False, affine=True,
72
+ allow_reverse_init=False):
73
+ assert affine
74
+ super().__init__()
75
+ self.logdet = logdet
76
+ self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
77
+ self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
78
+ self.allow_reverse_init = allow_reverse_init
79
+
80
+ self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
81
+
82
+ def initialize(self, input):
83
+ with torch.no_grad():
84
+ flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
85
+ mean = (
86
+ flatten.mean(1)
87
+ .unsqueeze(1)
88
+ .unsqueeze(2)
89
+ .unsqueeze(3)
90
+ .permute(1, 0, 2, 3)
91
+ )
92
+ std = (
93
+ flatten.std(1)
94
+ .unsqueeze(1)
95
+ .unsqueeze(2)
96
+ .unsqueeze(3)
97
+ .permute(1, 0, 2, 3)
98
+ )
99
+
100
+ self.loc.data.copy_(-mean)
101
+ self.scale.data.copy_(1 / (std + 1e-6))
102
+
103
+ def forward(self, input, reverse=False):
104
+ if reverse:
105
+ return self.reverse(input)
106
+ if len(input.shape) == 2:
107
+ input = input[:,:,None,None]
108
+ squeeze = True
109
+ else:
110
+ squeeze = False
111
+
112
+ _, _, height, width = input.shape
113
+
114
+ if self.training and self.initialized.item() == 0:
115
+ self.initialize(input)
116
+ self.initialized.fill_(1)
117
+
118
+ h = self.scale * (input + self.loc)
119
+
120
+ if squeeze:
121
+ h = h.squeeze(-1).squeeze(-1)
122
+
123
+ if self.logdet:
124
+ log_abs = torch.log(torch.abs(self.scale))
125
+ logdet = height*width*torch.sum(log_abs)
126
+ logdet = logdet * torch.ones(input.shape[0]).to(input)
127
+ return h, logdet
128
+
129
+ return h
130
+
131
+ def reverse(self, output):
132
+ if self.training and self.initialized.item() == 0:
133
+ if not self.allow_reverse_init:
134
+ raise RuntimeError(
135
+ "Initializing ActNorm in reverse direction is "
136
+ "disabled by default. Use allow_reverse_init=True to enable."
137
+ )
138
+ else:
139
+ self.initialize(output)
140
+ self.initialized.fill_(1)
141
+
142
+ if len(output.shape) == 2:
143
+ output = output[:,:,None,None]
144
+ squeeze = True
145
+ else:
146
+ squeeze = False
147
+
148
+ h = output / self.scale - self.loc
149
+
150
+ if squeeze:
151
+ h = h.squeeze(-1).squeeze(-1)
152
+ return h
tokenizer/tokenizer_image/discriminator_stylegan.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # stylegan2-pytorch: https://github.com/lucidrains/stylegan2-pytorch/blob/master/stylegan2_pytorch/stylegan2_pytorch.py
3
+ # stylegan2-pytorch: https://github.com/rosinality/stylegan2-pytorch/blob/master/model.py
4
+ # maskgit: https://github.com/google-research/maskgit/blob/main/maskgit/nets/discriminator.py
5
+ import math
6
+ import torch
7
+ import torch.nn as nn
8
+ try:
9
+ from kornia.filters import filter2d
10
+ except:
11
+ pass
12
+
13
+ class Discriminator(nn.Module):
14
+ def __init__(self, input_nc=3, ndf=64, n_layers=3, channel_multiplier=1, image_size=256):
15
+ super().__init__()
16
+ channels = {
17
+ 4: 512,
18
+ 8: 512,
19
+ 16: 512,
20
+ 32: 512,
21
+ 64: 256 * channel_multiplier,
22
+ 128: 128 * channel_multiplier,
23
+ 256: 64 * channel_multiplier,
24
+ 512: 32 * channel_multiplier,
25
+ 1024: 16 * channel_multiplier,
26
+ }
27
+
28
+ log_size = int(math.log(image_size, 2))
29
+ in_channel = channels[image_size]
30
+
31
+ blocks = [nn.Conv2d(input_nc, in_channel, 3, padding=1), leaky_relu()]
32
+ for i in range(log_size, 2, -1):
33
+ out_channel = channels[2 ** (i - 1)]
34
+ blocks.append(DiscriminatorBlock(in_channel, out_channel))
35
+ in_channel = out_channel
36
+ self.blocks = nn.ModuleList(blocks)
37
+
38
+ self.final_conv = nn.Sequential(
39
+ nn.Conv2d(in_channel, channels[4], 3, padding=1),
40
+ leaky_relu(),
41
+ )
42
+ self.final_linear = nn.Sequential(
43
+ nn.Linear(channels[4] * 4 * 4, channels[4]),
44
+ leaky_relu(),
45
+ nn.Linear(channels[4], 1)
46
+ )
47
+
48
+ def forward(self, x):
49
+ for block in self.blocks:
50
+ x = block(x)
51
+ x = self.final_conv(x)
52
+ x = x.view(x.shape[0], -1)
53
+ x = self.final_linear(x)
54
+ return x
55
+
56
+
57
+ class DiscriminatorBlock(nn.Module):
58
+ def __init__(self, input_channels, filters, downsample=True):
59
+ super().__init__()
60
+ self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
61
+
62
+ self.net = nn.Sequential(
63
+ nn.Conv2d(input_channels, filters, 3, padding=1),
64
+ leaky_relu(),
65
+ nn.Conv2d(filters, filters, 3, padding=1),
66
+ leaky_relu()
67
+ )
68
+
69
+ self.downsample = nn.Sequential(
70
+ Blur(),
71
+ nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)
72
+ ) if downsample else None
73
+
74
+ def forward(self, x):
75
+ res = self.conv_res(x)
76
+ x = self.net(x)
77
+ if exists(self.downsample):
78
+ x = self.downsample(x)
79
+ x = (x + res) * (1 / math.sqrt(2))
80
+ return x
81
+
82
+
83
+
84
+ class Blur(nn.Module):
85
+ def __init__(self):
86
+ super().__init__()
87
+ f = torch.Tensor([1, 2, 1])
88
+ self.register_buffer('f', f)
89
+
90
+ def forward(self, x):
91
+ f = self.f
92
+ f = f[None, None, :] * f [None, :, None]
93
+ return filter2d(x, f, normalized=True)
94
+
95
+
96
+ def leaky_relu(p=0.2):
97
+ return nn.LeakyReLU(p, inplace=True)
98
+
99
+
100
+ def exists(val):
101
+ return val is not None
tokenizer/tokenizer_image/lpips.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
2
+
3
+ import os, hashlib
4
+ import requests
5
+ from tqdm import tqdm
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ from torchvision import models
10
+ from collections import namedtuple
11
+
12
+ URL_MAP = {
13
+ "vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
14
+ }
15
+
16
+ CKPT_MAP = {
17
+ "vgg_lpips": "vgg.pth"
18
+ }
19
+
20
+ MD5_MAP = {
21
+ "vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
22
+ }
23
+
24
+ def download(url, local_path, chunk_size=1024):
25
+ os.makedirs(os.path.split(local_path)[0], exist_ok=True)
26
+ with requests.get(url, stream=True) as r:
27
+ total_size = int(r.headers.get("content-length", 0))
28
+ with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
29
+ with open(local_path, "wb") as f:
30
+ for data in r.iter_content(chunk_size=chunk_size):
31
+ if data:
32
+ f.write(data)
33
+ pbar.update(chunk_size)
34
+
35
+
36
+ def md5_hash(path):
37
+ with open(path, "rb") as f:
38
+ content = f.read()
39
+ return hashlib.md5(content).hexdigest()
40
+
41
+
42
+ def get_ckpt_path(name, root, check=False):
43
+ assert name in URL_MAP
44
+ path = os.path.join(root, CKPT_MAP[name])
45
+ if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
46
+ print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
47
+ download(URL_MAP[name], path)
48
+ md5 = md5_hash(path)
49
+ assert md5 == MD5_MAP[name], md5
50
+ return path
51
+
52
+
53
+ class LPIPS(nn.Module):
54
+ # Learned perceptual metric
55
+ def __init__(self, use_dropout=True):
56
+ super().__init__()
57
+ self.scaling_layer = ScalingLayer()
58
+ self.chns = [64, 128, 256, 512, 512] # vg16 features
59
+ self.net = vgg16(pretrained=True, requires_grad=False)
60
+ self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
61
+ self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
62
+ self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
63
+ self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
64
+ self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
65
+ self.load_from_pretrained()
66
+ for param in self.parameters():
67
+ param.requires_grad = False
68
+
69
+ def load_from_pretrained(self, name="vgg_lpips"):
70
+ ckpt = get_ckpt_path(name, os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache"))
71
+ self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
72
+ print("loaded pretrained LPIPS loss from {}".format(ckpt))
73
+
74
+ @classmethod
75
+ def from_pretrained(cls, name="vgg_lpips"):
76
+ if name != "vgg_lpips":
77
+ raise NotImplementedError
78
+ model = cls()
79
+ ckpt = get_ckpt_path(name, os.path.join(os.path.dirname(os.path.abspath(__file__)), "cache"))
80
+ model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
81
+ return model
82
+
83
+ def forward(self, input, target):
84
+ in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
85
+ outs0, outs1 = self.net(in0_input), self.net(in1_input)
86
+ feats0, feats1, diffs = {}, {}, {}
87
+ lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
88
+ for kk in range(len(self.chns)):
89
+ feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
90
+ diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
91
+
92
+ res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
93
+ val = res[0]
94
+ for l in range(1, len(self.chns)):
95
+ val += res[l]
96
+ return val
97
+
98
+
99
+ class ScalingLayer(nn.Module):
100
+ def __init__(self):
101
+ super(ScalingLayer, self).__init__()
102
+ self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
103
+ self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])
104
+
105
+ def forward(self, inp):
106
+ return (inp - self.shift) / self.scale
107
+
108
+
109
+ class NetLinLayer(nn.Module):
110
+ """ A single linear layer which does a 1x1 conv """
111
+ def __init__(self, chn_in, chn_out=1, use_dropout=False):
112
+ super(NetLinLayer, self).__init__()
113
+ layers = [nn.Dropout(), ] if (use_dropout) else []
114
+ layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
115
+ self.model = nn.Sequential(*layers)
116
+
117
+
118
+ class vgg16(torch.nn.Module):
119
+ def __init__(self, requires_grad=False, pretrained=True):
120
+ super(vgg16, self).__init__()
121
+ vgg_pretrained_features = models.vgg16(pretrained=pretrained).features
122
+ self.slice1 = torch.nn.Sequential()
123
+ self.slice2 = torch.nn.Sequential()
124
+ self.slice3 = torch.nn.Sequential()
125
+ self.slice4 = torch.nn.Sequential()
126
+ self.slice5 = torch.nn.Sequential()
127
+ self.N_slices = 5
128
+ for x in range(4):
129
+ self.slice1.add_module(str(x), vgg_pretrained_features[x])
130
+ for x in range(4, 9):
131
+ self.slice2.add_module(str(x), vgg_pretrained_features[x])
132
+ for x in range(9, 16):
133
+ self.slice3.add_module(str(x), vgg_pretrained_features[x])
134
+ for x in range(16, 23):
135
+ self.slice4.add_module(str(x), vgg_pretrained_features[x])
136
+ for x in range(23, 30):
137
+ self.slice5.add_module(str(x), vgg_pretrained_features[x])
138
+ if not requires_grad:
139
+ for param in self.parameters():
140
+ param.requires_grad = False
141
+
142
+ def forward(self, X):
143
+ h = self.slice1(X)
144
+ h_relu1_2 = h
145
+ h = self.slice2(h)
146
+ h_relu2_2 = h
147
+ h = self.slice3(h)
148
+ h_relu3_3 = h
149
+ h = self.slice4(h)
150
+ h_relu4_3 = h
151
+ h = self.slice5(h)
152
+ h_relu5_3 = h
153
+ vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
154
+ out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
155
+ return out
156
+
157
+
158
+ def normalize_tensor(x,eps=1e-10):
159
+ norm_factor = torch.sqrt(torch.sum(x**2,dim=1,keepdim=True))
160
+ return x/(norm_factor+eps)
161
+
162
+
163
+ def spatial_average(x, keepdim=True):
164
+ return x.mean([2,3],keepdim=keepdim)
tokenizer/tokenizer_image/reconstruction_vq_ddp.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.nn.functional as F
5
+ import torch.distributed as dist
6
+ from torch.utils.data import DataLoader
7
+ from torch.utils.data.distributed import DistributedSampler
8
+ from torchvision import transforms
9
+ from tqdm import tqdm
10
+ import os
11
+ from PIL import Image
12
+ import numpy as np
13
+ import argparse
14
+ import itertools
15
+
16
+ from skimage.metrics import peak_signal_noise_ratio as psnr_loss
17
+ from skimage.metrics import structural_similarity as ssim_loss
18
+ from dataset.augmentation import center_crop_arr
19
+ from dataset.build import build_dataset
20
+ from tokenizer.tokenizer_image.vq_model import VQ_models
21
+
22
+
23
+
24
+ def create_npz_from_sample_folder(sample_dir, num=50000):
25
+ """
26
+ Builds a single .npz file from a folder of .png samples.
27
+ """
28
+ samples = []
29
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
30
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
31
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
32
+ samples.append(sample_np)
33
+ samples = np.stack(samples)
34
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
35
+ npz_path = f"{sample_dir}.npz"
36
+ np.savez(npz_path, arr_0=samples)
37
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
38
+ return npz_path
39
+
40
+
41
+
42
+ def main(args):
43
+ # Setup PyTorch:
44
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
45
+ torch.set_grad_enabled(False)
46
+
47
+ # Setup DDP:
48
+ dist.init_process_group("nccl")
49
+ rank = dist.get_rank()
50
+ device = rank % torch.cuda.device_count()
51
+ seed = args.global_seed * dist.get_world_size() + rank
52
+ torch.manual_seed(seed)
53
+ torch.cuda.set_device(device)
54
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
55
+
56
+ # create and load model
57
+ vq_model = VQ_models[args.vq_model](
58
+ codebook_size=args.codebook_size,
59
+ codebook_embed_dim=args.codebook_embed_dim)
60
+ vq_model.to(device)
61
+ vq_model.eval()
62
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
63
+ if "ema" in checkpoint: # ema
64
+ model_weight = checkpoint["ema"]
65
+ elif "model" in checkpoint: # ddp
66
+ model_weight = checkpoint["model"]
67
+ elif "state_dict" in checkpoint:
68
+ model_weight = checkpoint["state_dict"]
69
+ else:
70
+ raise Exception("please check model weight")
71
+ vq_model.load_state_dict(model_weight)
72
+ del checkpoint
73
+
74
+ # Create folder to save samples:
75
+ folder_name = (f"{args.vq_model}-{args.dataset}-size-{args.image_size}-size-{args.image_size_eval}"
76
+ f"-codebook-size-{args.codebook_size}-dim-{args.codebook_embed_dim}-seed-{args.global_seed}")
77
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
78
+ if rank == 0:
79
+ os.makedirs(sample_folder_dir, exist_ok=True)
80
+ print(f"Saving .png samples at {sample_folder_dir}")
81
+ dist.barrier()
82
+
83
+ # Setup data:
84
+ transform = transforms.Compose([
85
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
86
+ transforms.ToTensor(),
87
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
88
+ ])
89
+
90
+ if args.dataset == 'imagenet':
91
+ dataset = build_dataset(args, transform=transform)
92
+ num_fid_samples = 50000
93
+ elif args.dataset == 'coco':
94
+ dataset = build_dataset(args, transform=transform)
95
+ num_fid_samples = 5000
96
+ elif args.dataset == 'imagenet_code':
97
+ dataset = build_dataset(args)
98
+ num_fid_samples = 50000
99
+ else:
100
+ raise Exception("please check dataset")
101
+
102
+ sampler = DistributedSampler(
103
+ dataset,
104
+ num_replicas=dist.get_world_size(),
105
+ rank=rank,
106
+ shuffle=False,
107
+ seed=args.global_seed
108
+ )
109
+ loader = DataLoader(
110
+ dataset,
111
+ batch_size=args.per_proc_batch_size,
112
+ shuffle=False,
113
+ sampler=sampler,
114
+ num_workers=args.num_workers,
115
+ pin_memory=True,
116
+ drop_last=False
117
+ )
118
+
119
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
120
+ n = args.per_proc_batch_size
121
+ global_batch_size = n * dist.get_world_size()
122
+
123
+ psnr_val_rgb = []
124
+ ssim_val_rgb = []
125
+ loader = tqdm(loader) if rank == 0 else loader
126
+ total = 0
127
+ # for x, _ in loader:
128
+ for batch in loader:
129
+ x = batch['condition_imgs'].repeat(1,3,1,1)
130
+ # import pdb
131
+ # pdb.set_trace()
132
+ if args.image_size_eval != args.image_size:
133
+ rgb_gts = F.interpolate(x, size=(args.image_size_eval, args.image_size_eval), mode='bicubic')
134
+ else:
135
+ rgb_gts = x
136
+ rgb_gts = (rgb_gts.permute(0, 2, 3, 1).to("cpu").numpy() + 1.0) / 2.0 # rgb_gt value is between [0, 1]
137
+ x = x.to(device, non_blocking=True)
138
+ with torch.no_grad():
139
+ latent, _, [_, _, indices] = vq_model.encode(x.float())
140
+ import pdb;pdb.set_trace()
141
+ samples = vq_model.decode_code(indices, latent.shape) # output value is between [-1, 1]
142
+ if args.image_size_eval != args.image_size:
143
+ samples = F.interpolate(samples, size=(args.image_size_eval, args.image_size_eval), mode='bicubic')
144
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
145
+
146
+ # Save samples to disk as individual .png files
147
+ for i, (sample, rgb_gt) in enumerate(zip(samples, rgb_gts)):
148
+ index = i * dist.get_world_size() + rank + total
149
+ # Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
150
+ # metric
151
+ rgb_restored = sample.astype(np.float32) / 255. # rgb_restored value is between [0, 1]
152
+ psnr = psnr_loss(rgb_restored, rgb_gt)
153
+ ssim = ssim_loss(rgb_restored, rgb_gt, multichannel=True, data_range=2.0, channel_axis=-1)
154
+ psnr_val_rgb.append(psnr)
155
+ ssim_val_rgb.append(ssim)
156
+
157
+ total += global_batch_size
158
+
159
+ # ------------------------------------
160
+ # Summary
161
+ # ------------------------------------
162
+ # Make sure all processes have finished saving their samples
163
+ dist.barrier()
164
+ world_size = dist.get_world_size()
165
+ gather_psnr_val = [None for _ in range(world_size)]
166
+ gather_ssim_val = [None for _ in range(world_size)]
167
+ dist.all_gather_object(gather_psnr_val, psnr_val_rgb)
168
+ dist.all_gather_object(gather_ssim_val, ssim_val_rgb)
169
+
170
+ if rank == 0:
171
+ gather_psnr_val = list(itertools.chain(*gather_psnr_val))
172
+ gather_ssim_val = list(itertools.chain(*gather_ssim_val))
173
+ psnr_val_rgb = sum(gather_psnr_val) / len(gather_psnr_val)
174
+ ssim_val_rgb = sum(gather_ssim_val) / len(gather_ssim_val)
175
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb))
176
+
177
+ result_file = f"{sample_folder_dir}_results.txt"
178
+ print("writing results to {}".format(result_file))
179
+ with open(result_file, 'w') as f:
180
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb), file=f)
181
+
182
+ create_npz_from_sample_folder(sample_folder_dir, num_fid_samples)
183
+ print("Done.")
184
+
185
+ dist.barrier()
186
+ dist.destroy_process_group()
187
+
188
+
189
+ if __name__ == "__main__":
190
+ parser = argparse.ArgumentParser()
191
+ parser.add_argument("--data-path", type=str, default=None)
192
+ parser.add_argument("--code-path", type=str, required=True)
193
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco', 'imagenet_code'], default='imagenet')
194
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
195
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
196
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
197
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
198
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 512], default=256)
199
+ parser.add_argument("--image-size-eval", type=int, choices=[256, 384, 512], default=256)
200
+ parser.add_argument("--sample-dir", type=str, default="reconstructions")
201
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
202
+ parser.add_argument("--global-seed", type=int, default=0)
203
+ parser.add_argument("--num-workers", type=int, default=4)
204
+ parser.add_argument("--condition", type=str, choices=['canny', 'hed'], default='canny')
205
+ parser.add_argument("--get-condition-img", type=bool, default=False)
206
+ args = parser.parse_args()
207
+ main(args)
tokenizer/tokenizer_image/vq_demo.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ import os
5
+ import argparse
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ from tokenizer.tokenizer_image.vq_model import VQ_models
10
+ from dataset.augmentation import center_crop_arr
11
+
12
+
13
+ def main(args):
14
+ # Setup PyTorch:
15
+ torch.manual_seed(args.seed)
16
+ torch.set_grad_enabled(False)
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+
19
+ # create and load model
20
+ model = VQ_models[args.vq_model](
21
+ codebook_size=args.codebook_size,
22
+ codebook_embed_dim=args.codebook_embed_dim)
23
+ model.to(device)
24
+ model.eval()
25
+ checkpoint = torch.load(args.vq_ckpt, map_location="cpu")
26
+ if "ema" in checkpoint: # ema
27
+ model_weight = checkpoint["ema"]
28
+ elif "model" in checkpoint: # ddp
29
+ model_weight = checkpoint["model"]
30
+ elif "state_dict" in checkpoint:
31
+ model_weight = checkpoint["state_dict"]
32
+ else:
33
+ raise Exception("please check model weight")
34
+ model.load_state_dict(model_weight)
35
+ del checkpoint
36
+
37
+ # output dir
38
+ os.makedirs(args.output_dir, exist_ok=True)
39
+ out_path = args.image_path.replace('.jpg', '_{}.jpg'.format(args.suffix))
40
+ out_path = out_path.replace('.jpeg', '_{}.jpeg'.format(args.suffix))
41
+ out_path = out_path.replace('.png', '_{}.png'.format(args.suffix))
42
+ out_filename = out_path.split('/')[-1]
43
+ out_path = os.path.join(args.output_dir, out_filename)
44
+
45
+ # load image
46
+ pil_image = Image.open(args.image_path).convert("RGB")
47
+ img = center_crop_arr(pil_image, args.image_size)
48
+ # # preprocess
49
+ # size_org = img.size
50
+ # img = img.resize((input_size, input_size))
51
+ img = np.array(img) / 255.
52
+ x = 2.0 * img - 1.0 # x value is between [-1, 1]
53
+ x = torch.tensor(x)
54
+ x = x.unsqueeze(dim=0)
55
+ x = torch.einsum('nhwc->nchw', x)
56
+ x_input = x.float().to("cuda")
57
+
58
+ # inference
59
+ with torch.no_grad():
60
+ latent, _, [_, _, indices] = model.encode(x_input)
61
+ output = model.decode_code(indices, latent.shape) # output value is between [-1, 1]
62
+
63
+ # postprocess
64
+ output = F.interpolate(output, size=[args.image_size, args.image_size], mode='bicubic').permute(0, 2, 3, 1)[0]
65
+ sample = torch.clamp(127.5 * output + 128.0, 0, 255).to("cpu", dtype=torch.uint8).numpy()
66
+
67
+ # save
68
+ Image.fromarray(sample).save(out_path)
69
+ print("Reconstructed image is saved to {}".format(out_path))
70
+
71
+
72
+ if __name__ == "__main__":
73
+ parser = argparse.ArgumentParser()
74
+ parser.add_argument("--image-path", type=str, default="assets/example.jpg")
75
+ parser.add_argument("--output-dir", type=str, default="output_vq_demo")
76
+ parser.add_argument("--suffix", type=str, default="tokenizer_image")
77
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models.keys()), default="VQ-16")
78
+ parser.add_argument("--vq-ckpt", type=str, default=None, help="ckpt path for vq model")
79
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
80
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
81
+ parser.add_argument("--image-size", type=int, choices=[256, 384, 448, 512, 1024], default=512)
82
+ parser.add_argument("--seed", type=int, default=0)
83
+ args = parser.parse_args()
84
+ main(args)
tokenizer/tokenizer_image/vq_loss.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # taming-transformers: https://github.com/CompVis/taming-transformers
3
+ # muse-maskgit-pytorch: https://github.com/lucidrains/muse-maskgit-pytorch/blob/main/muse_maskgit_pytorch/vqgan_vae.py
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ from tokenizer.tokenizer_image.lpips import LPIPS
9
+ from tokenizer.tokenizer_image.discriminator_patchgan import NLayerDiscriminator as PatchGANDiscriminator
10
+ from tokenizer.tokenizer_image.discriminator_stylegan import Discriminator as StyleGANDiscriminator
11
+
12
+
13
+
14
+ def hinge_d_loss(logits_real, logits_fake):
15
+ loss_real = torch.mean(F.relu(1. - logits_real))
16
+ loss_fake = torch.mean(F.relu(1. + logits_fake))
17
+ d_loss = 0.5 * (loss_real + loss_fake)
18
+ return d_loss
19
+
20
+
21
+ def vanilla_d_loss(logits_real, logits_fake):
22
+ loss_real = torch.mean(F.softplus(-logits_real))
23
+ loss_fake = torch.mean(F.softplus(logits_fake))
24
+ d_loss = 0.5 * (loss_real + loss_fake)
25
+ return d_loss
26
+
27
+
28
+ def non_saturating_d_loss(logits_real, logits_fake):
29
+ loss_real = torch.mean(F.binary_cross_entropy_with_logits(torch.ones_like(logits_real), logits_real))
30
+ loss_fake = torch.mean(F.binary_cross_entropy_with_logits(torch.zeros_like(logits_fake), logits_fake))
31
+ d_loss = 0.5 * (loss_real + loss_fake)
32
+ return d_loss
33
+
34
+
35
+ def hinge_gen_loss(logit_fake):
36
+ return -torch.mean(logit_fake)
37
+
38
+
39
+ def non_saturating_gen_loss(logit_fake):
40
+ return torch.mean(F.binary_cross_entropy_with_logits(torch.ones_like(logit_fake), logit_fake))
41
+
42
+
43
+ def adopt_weight(weight, global_step, threshold=0, value=0.):
44
+ if global_step < threshold:
45
+ weight = value
46
+ return weight
47
+
48
+
49
+ class VQLoss(nn.Module):
50
+ def __init__(self, disc_start, disc_loss="hinge", disc_dim=64, disc_type='patchgan', image_size=256,
51
+ disc_num_layers=3, disc_in_channels=3, disc_weight=1.0, disc_adaptive_weight = False,
52
+ gen_adv_loss='hinge', reconstruction_loss='l2', reconstruction_weight=1.0,
53
+ codebook_weight=1.0, perceptual_weight=1.0,
54
+ ):
55
+ super().__init__()
56
+ # discriminator loss
57
+ assert disc_type in ["patchgan", "stylegan"]
58
+ assert disc_loss in ["hinge", "vanilla", "non-saturating"]
59
+ if disc_type == "patchgan":
60
+ self.discriminator = PatchGANDiscriminator(
61
+ input_nc=disc_in_channels,
62
+ n_layers=disc_num_layers,
63
+ ndf=disc_dim,
64
+ )
65
+ elif disc_type == "stylegan":
66
+ self.discriminator = StyleGANDiscriminator(
67
+ input_nc=disc_in_channels,
68
+ image_size=image_size,
69
+ )
70
+ else:
71
+ raise ValueError(f"Unknown GAN discriminator type '{disc_type}'.")
72
+ if disc_loss == "hinge":
73
+ self.disc_loss = hinge_d_loss
74
+ elif disc_loss == "vanilla":
75
+ self.disc_loss = vanilla_d_loss
76
+ elif disc_loss == "non-saturating":
77
+ self.disc_loss = non_saturating_d_loss
78
+ else:
79
+ raise ValueError(f"Unknown GAN discriminator loss '{disc_loss}'.")
80
+ self.discriminator_iter_start = disc_start
81
+ self.disc_weight = disc_weight
82
+ self.disc_adaptive_weight = disc_adaptive_weight
83
+
84
+ assert gen_adv_loss in ["hinge", "non-saturating"]
85
+ # gen_adv_loss
86
+ if gen_adv_loss == "hinge":
87
+ self.gen_adv_loss = hinge_gen_loss
88
+ elif gen_adv_loss == "non-saturating":
89
+ self.gen_adv_loss = non_saturating_gen_loss
90
+ else:
91
+ raise ValueError(f"Unknown GAN generator loss '{gen_adv_loss}'.")
92
+
93
+ # perceptual loss
94
+ self.perceptual_loss = LPIPS().eval()
95
+ self.perceptual_weight = perceptual_weight
96
+
97
+ # reconstruction loss
98
+ if reconstruction_loss == "l1":
99
+ self.rec_loss = F.l1_loss
100
+ elif reconstruction_loss == "l2":
101
+ self.rec_loss = F.mse_loss
102
+ else:
103
+ raise ValueError(f"Unknown rec loss '{reconstruction_loss}'.")
104
+ self.rec_weight = reconstruction_weight
105
+
106
+ # codebook loss
107
+ self.codebook_weight = codebook_weight
108
+
109
+ def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer):
110
+ nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
111
+ g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
112
+
113
+ d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
114
+ d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
115
+ return d_weight.detach()
116
+
117
+ def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, global_step, last_layer=None,
118
+ logger=None, log_every=100):
119
+ # generator update
120
+ if optimizer_idx == 0:
121
+ # reconstruction loss
122
+ rec_loss = self.rec_loss(inputs.contiguous(), reconstructions.contiguous())
123
+
124
+ # perceptual loss
125
+ p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
126
+ p_loss = torch.mean(p_loss)
127
+
128
+ # discriminator loss
129
+ logits_fake = self.discriminator(reconstructions.contiguous())
130
+ generator_adv_loss = self.gen_adv_loss(logits_fake)
131
+
132
+ if self.disc_adaptive_weight:
133
+ null_loss = self.rec_weight * rec_loss + self.perceptual_weight * p_loss
134
+ disc_adaptive_weight = self.calculate_adaptive_weight(null_loss, generator_adv_loss, last_layer=last_layer)
135
+ else:
136
+ disc_adaptive_weight = 1
137
+ disc_weight = adopt_weight(self.disc_weight, global_step, threshold=self.discriminator_iter_start)
138
+
139
+ loss = self.rec_weight * rec_loss + \
140
+ self.perceptual_weight * p_loss + \
141
+ disc_adaptive_weight * disc_weight * generator_adv_loss + \
142
+ codebook_loss[0] + codebook_loss[1] + codebook_loss[2]
143
+
144
+ if global_step % log_every == 0:
145
+ rec_loss = self.rec_weight * rec_loss
146
+ p_loss = self.perceptual_weight * p_loss
147
+ generator_adv_loss = disc_adaptive_weight * disc_weight * generator_adv_loss
148
+ logger.info(f"(Generator) rec_loss: {rec_loss:.4f}, perceptual_loss: {p_loss:.4f}, "
149
+ f"vq_loss: {codebook_loss[0]:.4f}, commit_loss: {codebook_loss[1]:.4f}, entropy_loss: {codebook_loss[2]:.4f}, "
150
+ f"codebook_usage: {codebook_loss[3]:.4f}, generator_adv_loss: {generator_adv_loss:.4f}, "
151
+ f"disc_adaptive_weight: {disc_adaptive_weight:.4f}, disc_weight: {disc_weight:.4f}")
152
+ return loss
153
+
154
+ # discriminator update
155
+ if optimizer_idx == 1:
156
+ logits_real = self.discriminator(inputs.contiguous().detach())
157
+ logits_fake = self.discriminator(reconstructions.contiguous().detach())
158
+
159
+ disc_weight = adopt_weight(self.disc_weight, global_step, threshold=self.discriminator_iter_start)
160
+ d_adversarial_loss = disc_weight * self.disc_loss(logits_real, logits_fake)
161
+
162
+ if global_step % log_every == 0:
163
+ logits_real = logits_real.detach().mean()
164
+ logits_fake = logits_fake.detach().mean()
165
+ logger.info(f"(Discriminator) "
166
+ f"discriminator_adv_loss: {d_adversarial_loss:.4f}, disc_weight: {disc_weight:.4f}, "
167
+ f"logits_real: {logits_real:.4f}, logits_fake: {logits_fake:.4f}")
168
+ return d_adversarial_loss
tokenizer/tokenizer_image/vq_model.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Modified from:
2
+ # taming-transformers: https://github.com/CompVis/taming-transformers
3
+ # maskgit: https://github.com/google-research/maskgit
4
+ from dataclasses import dataclass, field
5
+ from typing import List
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+
12
+ @dataclass
13
+ class ModelArgs:
14
+ codebook_size: int = 16384
15
+ codebook_embed_dim: int = 8
16
+ codebook_l2_norm: bool = True
17
+ codebook_show_usage: bool = True
18
+ commit_loss_beta: float = 0.25
19
+ entropy_loss_ratio: float = 0.0
20
+
21
+ encoder_ch_mult: List[int] = field(default_factory=lambda: [1, 1, 2, 2, 4])
22
+ decoder_ch_mult: List[int] = field(default_factory=lambda: [1, 1, 2, 2, 4])
23
+ z_channels: int = 256
24
+ dropout_p: float = 0.0
25
+
26
+
27
+
28
+ class VQModel(nn.Module):
29
+ def __init__(self, config: ModelArgs):
30
+ super().__init__()
31
+ self.config = config
32
+ self.encoder = Encoder(ch_mult=config.encoder_ch_mult, z_channels=config.z_channels, dropout=config.dropout_p)
33
+ self.decoder = Decoder(ch_mult=config.decoder_ch_mult, z_channels=config.z_channels, dropout=config.dropout_p)
34
+
35
+ self.quantize = VectorQuantizer(config.codebook_size, config.codebook_embed_dim,
36
+ config.commit_loss_beta, config.entropy_loss_ratio,
37
+ config.codebook_l2_norm, config.codebook_show_usage)
38
+ self.quant_conv = nn.Conv2d(config.z_channels, config.codebook_embed_dim, 1)
39
+ self.post_quant_conv = nn.Conv2d(config.codebook_embed_dim, config.z_channels, 1)
40
+
41
+ def encode(self, x):
42
+ #import pdb; pdb.set_trace()
43
+ h = self.encoder(x)
44
+ h = self.quant_conv(h)
45
+ quant, emb_loss, info = self.quantize(h)
46
+ return quant, emb_loss, info
47
+
48
+ def decode(self, quant):
49
+ quant = self.post_quant_conv(quant)
50
+ dec = self.decoder(quant)
51
+ return dec
52
+
53
+ def decode_code(self, code_b, shape=None, channel_first=True):
54
+ quant_b = self.quantize.get_codebook_entry(code_b, shape, channel_first)
55
+ dec = self.decode(quant_b)
56
+ return dec
57
+
58
+ def forward(self, input):
59
+ quant, diff, _ = self.encode(input)
60
+ dec = self.decode(quant)
61
+ return dec, diff
62
+
63
+
64
+
65
+ class Encoder(nn.Module):
66
+ def __init__(self, in_channels=3, ch=128, ch_mult=(1,1,2,2,4), num_res_blocks=2,
67
+ norm_type='group', dropout=0.0, resamp_with_conv=True, z_channels=256):
68
+ super().__init__()
69
+ self.num_resolutions = len(ch_mult)
70
+ self.num_res_blocks = num_res_blocks
71
+ self.conv_in = nn.Conv2d(in_channels, ch, kernel_size=3, stride=1, padding=1)
72
+
73
+ # downsampling
74
+ in_ch_mult = (1,) + tuple(ch_mult)
75
+ self.conv_blocks = nn.ModuleList()
76
+ for i_level in range(self.num_resolutions):
77
+ conv_block = nn.Module()
78
+ # res & attn
79
+ res_block = nn.ModuleList()
80
+ attn_block = nn.ModuleList()
81
+ block_in = ch*in_ch_mult[i_level]
82
+ block_out = ch*ch_mult[i_level]
83
+ for _ in range(self.num_res_blocks):
84
+ res_block.append(ResnetBlock(block_in, block_out, dropout=dropout, norm_type=norm_type))
85
+ block_in = block_out
86
+ if i_level == self.num_resolutions - 1:
87
+ attn_block.append(AttnBlock(block_in, norm_type))
88
+ conv_block.res = res_block
89
+ conv_block.attn = attn_block
90
+ # downsample
91
+ if i_level != self.num_resolutions-1:
92
+ conv_block.downsample = Downsample(block_in, resamp_with_conv)
93
+ self.conv_blocks.append(conv_block)
94
+
95
+ # middle
96
+ self.mid = nn.ModuleList()
97
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
98
+ self.mid.append(AttnBlock(block_in, norm_type=norm_type))
99
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
100
+
101
+ # end
102
+ self.norm_out = Normalize(block_in, norm_type)
103
+ self.conv_out = nn.Conv2d(block_in, z_channels, kernel_size=3, stride=1, padding=1)
104
+
105
+
106
+ def forward(self, x):
107
+ h = self.conv_in(x)
108
+ # downsampling
109
+ for i_level, block in enumerate(self.conv_blocks):
110
+ for i_block in range(self.num_res_blocks):
111
+ h = block.res[i_block](h)
112
+ if len(block.attn) > 0:
113
+ h = block.attn[i_block](h)
114
+ if i_level != self.num_resolutions - 1:
115
+ h = block.downsample(h)
116
+
117
+ # middle
118
+ for mid_block in self.mid:
119
+ h = mid_block(h)
120
+
121
+ # end
122
+ h = self.norm_out(h)
123
+ h = nonlinearity(h)
124
+ h = self.conv_out(h)
125
+ return h
126
+
127
+
128
+
129
+ class Decoder(nn.Module):
130
+ def __init__(self, z_channels=256, ch=128, ch_mult=(1,1,2,2,4), num_res_blocks=2, norm_type="group",
131
+ dropout=0.0, resamp_with_conv=True, out_channels=3):
132
+ super().__init__()
133
+ self.num_resolutions = len(ch_mult)
134
+ self.num_res_blocks = num_res_blocks
135
+
136
+ block_in = ch*ch_mult[self.num_resolutions-1]
137
+ # z to block_in
138
+ self.conv_in = nn.Conv2d(z_channels, block_in, kernel_size=3, stride=1, padding=1)
139
+
140
+ # middle
141
+ self.mid = nn.ModuleList()
142
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
143
+ self.mid.append(AttnBlock(block_in, norm_type=norm_type))
144
+ self.mid.append(ResnetBlock(block_in, block_in, dropout=dropout, norm_type=norm_type))
145
+
146
+ # upsampling
147
+ self.conv_blocks = nn.ModuleList()
148
+ for i_level in reversed(range(self.num_resolutions)):
149
+ conv_block = nn.Module()
150
+ # res & attn
151
+ res_block = nn.ModuleList()
152
+ attn_block = nn.ModuleList()
153
+ block_out = ch*ch_mult[i_level]
154
+ for _ in range(self.num_res_blocks + 1):
155
+ res_block.append(ResnetBlock(block_in, block_out, dropout=dropout, norm_type=norm_type))
156
+ block_in = block_out
157
+ if i_level == self.num_resolutions - 1:
158
+ attn_block.append(AttnBlock(block_in, norm_type))
159
+ conv_block.res = res_block
160
+ conv_block.attn = attn_block
161
+ # downsample
162
+ if i_level != 0:
163
+ conv_block.upsample = Upsample(block_in, resamp_with_conv)
164
+ self.conv_blocks.append(conv_block)
165
+
166
+ # end
167
+ self.norm_out = Normalize(block_in, norm_type)
168
+ self.conv_out = nn.Conv2d(block_in, out_channels, kernel_size=3, stride=1, padding=1)
169
+
170
+ @property
171
+ def last_layer(self):
172
+ return self.conv_out.weight
173
+
174
+ def forward(self, z):
175
+ # z to block_in
176
+ h = self.conv_in(z)
177
+
178
+ # middle
179
+ for mid_block in self.mid:
180
+ h = mid_block(h)
181
+
182
+ # upsampling
183
+ for i_level, block in enumerate(self.conv_blocks):
184
+ for i_block in range(self.num_res_blocks + 1):
185
+ h = block.res[i_block](h)
186
+ if len(block.attn) > 0:
187
+ h = block.attn[i_block](h)
188
+ if i_level != self.num_resolutions - 1:
189
+ h = block.upsample(h)
190
+
191
+ # end
192
+ h = self.norm_out(h)
193
+ h = nonlinearity(h)
194
+ h = self.conv_out(h)
195
+ return h
196
+
197
+
198
+ class VectorQuantizer(nn.Module):
199
+ def __init__(self, n_e, e_dim, beta, entropy_loss_ratio, l2_norm, show_usage):
200
+ super().__init__()
201
+ self.n_e = n_e
202
+ self.e_dim = e_dim
203
+ self.beta = beta
204
+ self.entropy_loss_ratio = entropy_loss_ratio
205
+ self.l2_norm = l2_norm
206
+ self.show_usage = show_usage
207
+
208
+ self.embedding = nn.Embedding(self.n_e, self.e_dim)
209
+ self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)
210
+ if self.l2_norm:
211
+ self.embedding.weight.data = F.normalize(self.embedding.weight.data, p=2, dim=-1)
212
+ if self.show_usage:
213
+ self.register_buffer("codebook_used", nn.Parameter(torch.zeros(65536)))
214
+
215
+
216
+ def forward(self, z):
217
+ # reshape z -> (batch, height, width, channel) and flatten
218
+ z = torch.einsum('b c h w -> b h w c', z).contiguous()
219
+ z_flattened = z.view(-1, self.e_dim)
220
+ # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
221
+
222
+ if self.l2_norm:
223
+ z = F.normalize(z, p=2, dim=-1)
224
+ z_flattened = F.normalize(z_flattened, p=2, dim=-1)
225
+ embedding = F.normalize(self.embedding.weight, p=2, dim=-1)
226
+ else:
227
+ embedding = self.embedding.weight
228
+
229
+ d = torch.sum(z_flattened ** 2, dim=1, keepdim=True) + \
230
+ torch.sum(embedding**2, dim=1) - 2 * \
231
+ torch.einsum('bd,dn->bn', z_flattened, torch.einsum('n d -> d n', embedding))
232
+
233
+ min_encoding_indices = torch.argmin(d, dim=1)
234
+ z_q = embedding[min_encoding_indices].view(z.shape)
235
+ perplexity = None
236
+ min_encodings = None
237
+ vq_loss = None
238
+ commit_loss = None
239
+ entropy_loss = None
240
+ codebook_usage = 0
241
+
242
+ if self.show_usage and self.training:
243
+ cur_len = min_encoding_indices.shape[0]
244
+ self.codebook_used[:-cur_len] = self.codebook_used[cur_len:].clone()
245
+ self.codebook_used[-cur_len:] = min_encoding_indices
246
+ codebook_usage = len(torch.unique(self.codebook_used)) / self.n_e
247
+
248
+ # compute loss for embedding
249
+ if self.training:
250
+ vq_loss = torch.mean((z_q - z.detach()) ** 2)
251
+ commit_loss = self.beta * torch.mean((z_q.detach() - z) ** 2)
252
+ entropy_loss = self.entropy_loss_ratio * compute_entropy_loss(-d)
253
+
254
+ # preserve gradients
255
+ z_q = z + (z_q - z).detach()
256
+
257
+ # reshape back to match original input shape
258
+ z_q = torch.einsum('b h w c -> b c h w', z_q)
259
+
260
+ return z_q, (vq_loss, commit_loss, entropy_loss, codebook_usage), (perplexity, min_encodings, min_encoding_indices)
261
+
262
+ def get_codebook_entry(self, indices, shape=None, channel_first=True):
263
+ # shape = (batch, channel, height, width) if channel_first else (batch, height, width, channel)
264
+ if self.l2_norm:
265
+ embedding = F.normalize(self.embedding.weight, p=2, dim=-1)
266
+ else:
267
+ embedding = self.embedding.weight
268
+ z_q = embedding[indices] # (b*h*w, c)
269
+
270
+ if shape is not None:
271
+ if channel_first:
272
+ z_q = z_q.reshape(shape[0], shape[2], shape[3], shape[1])
273
+ # reshape back to match original input shape
274
+ z_q = z_q.permute(0, 3, 1, 2).contiguous()
275
+ else:
276
+ z_q = z_q.view(shape)
277
+ return z_q
278
+
279
+
280
+ class ResnetBlock(nn.Module):
281
+ def __init__(self, in_channels, out_channels=None, conv_shortcut=False, dropout=0.0, norm_type='group'):
282
+ super().__init__()
283
+ self.in_channels = in_channels
284
+ out_channels = in_channels if out_channels is None else out_channels
285
+ self.out_channels = out_channels
286
+ self.use_conv_shortcut = conv_shortcut
287
+
288
+ self.norm1 = Normalize(in_channels, norm_type)
289
+ self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
290
+ self.norm2 = Normalize(out_channels, norm_type)
291
+ self.dropout = nn.Dropout(dropout)
292
+ self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
293
+
294
+ if self.in_channels != self.out_channels:
295
+ if self.use_conv_shortcut:
296
+ self.conv_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
297
+ else:
298
+ self.nin_shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
299
+
300
+ def forward(self, x):
301
+ h = x
302
+ h = self.norm1(h)
303
+ h = nonlinearity(h)
304
+ h = self.conv1(h)
305
+ h = self.norm2(h)
306
+ h = nonlinearity(h)
307
+ h = self.dropout(h)
308
+ h = self.conv2(h)
309
+
310
+ if self.in_channels != self.out_channels:
311
+ if self.use_conv_shortcut:
312
+ x = self.conv_shortcut(x)
313
+ else:
314
+ x = self.nin_shortcut(x)
315
+ return x+h
316
+
317
+
318
+ class AttnBlock(nn.Module):
319
+ def __init__(self, in_channels, norm_type='group'):
320
+ super().__init__()
321
+ self.norm = Normalize(in_channels, norm_type)
322
+ self.q = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
323
+ self.k = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
324
+ self.v = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
325
+ self.proj_out = nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, padding=0)
326
+
327
+
328
+ def forward(self, x):
329
+ h_ = x
330
+ h_ = self.norm(h_)
331
+ q = self.q(h_)
332
+ k = self.k(h_)
333
+ v = self.v(h_)
334
+
335
+ # compute attention
336
+ b,c,h,w = q.shape
337
+ q = q.reshape(b,c,h*w)
338
+ q = q.permute(0,2,1) # b,hw,c
339
+ k = k.reshape(b,c,h*w) # b,c,hw
340
+ w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
341
+ w_ = w_ * (int(c)**(-0.5))
342
+ w_ = F.softmax(w_, dim=2)
343
+
344
+ # attend to values
345
+ v = v.reshape(b,c,h*w)
346
+ w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
347
+ h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
348
+ h_ = h_.reshape(b,c,h,w)
349
+
350
+ h_ = self.proj_out(h_)
351
+
352
+ return x+h_
353
+
354
+
355
+ def nonlinearity(x):
356
+ # swish
357
+ return x*torch.sigmoid(x)
358
+
359
+
360
+ def Normalize(in_channels, norm_type='group'):
361
+ assert norm_type in ['group', 'batch']
362
+ if norm_type == 'group':
363
+ return nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
364
+ elif norm_type == 'batch':
365
+ return nn.SyncBatchNorm(in_channels)
366
+
367
+
368
+ class Upsample(nn.Module):
369
+ def __init__(self, in_channels, with_conv):
370
+ super().__init__()
371
+ self.with_conv = with_conv
372
+ if self.with_conv:
373
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
374
+
375
+ def forward(self, x):
376
+ x = F.interpolate(x, scale_factor=2.0, mode="nearest")
377
+ if self.with_conv:
378
+ x = self.conv(x)
379
+ return x
380
+
381
+
382
+ class Downsample(nn.Module):
383
+ def __init__(self, in_channels, with_conv):
384
+ super().__init__()
385
+ self.with_conv = with_conv
386
+ if self.with_conv:
387
+ # no asymmetric padding in torch conv, must do it ourselves
388
+ self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
389
+
390
+ def forward(self, x):
391
+ if self.with_conv:
392
+ pad = (0,1,0,1)
393
+ x = F.pad(x, pad, mode="constant", value=0)
394
+ x = self.conv(x)
395
+ else:
396
+ x = F.avg_pool2d(x, kernel_size=2, stride=2)
397
+ return x
398
+
399
+
400
+ def compute_entropy_loss(affinity, loss_type="softmax", temperature=0.01):
401
+ flat_affinity = affinity.reshape(-1, affinity.shape[-1])
402
+ flat_affinity /= temperature
403
+ probs = F.softmax(flat_affinity, dim=-1)
404
+ log_probs = F.log_softmax(flat_affinity + 1e-5, dim=-1)
405
+ if loss_type == "softmax":
406
+ target_probs = probs
407
+ else:
408
+ raise ValueError("Entropy loss {} not supported".format(loss_type))
409
+ avg_probs = torch.mean(target_probs, dim=0)
410
+ avg_entropy = - torch.sum(avg_probs * torch.log(avg_probs + 1e-5))
411
+ sample_entropy = - torch.mean(torch.sum(target_probs * log_probs, dim=-1))
412
+ loss = sample_entropy - avg_entropy
413
+ return loss
414
+
415
+
416
+ #################################################################################
417
+ # VQ Model Configs #
418
+ #################################################################################
419
+ def VQ_8(**kwargs):
420
+ return VQModel(ModelArgs(encoder_ch_mult=[1, 2, 2, 4], decoder_ch_mult=[1, 2, 2, 4], **kwargs))
421
+
422
+ def VQ_16(**kwargs):
423
+ return VQModel(ModelArgs(encoder_ch_mult=[1, 1, 2, 2, 4], decoder_ch_mult=[1, 1, 2, 2, 4], **kwargs))
424
+
425
+ VQ_models = {'VQ-16': VQ_16, 'VQ-8': VQ_8}
tokenizer/vae/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## VAE Models from Stable Diffusion
2
+
3
+ ### install
4
+ ```
5
+ pip install diffusers
6
+ pip install accelerate
7
+ ```
8
+
9
+ ### demo
10
+ ```
11
+ cd ${THIS_REPO_ROOT}
12
+ python3 tokenizer/vae/sd_vae_demo.py
13
+ ```
14
+
tokenizer/vae/reconstruction_vae_ddp.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.distributed as dist
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torch.utils.data.distributed import DistributedSampler
7
+ from torchvision.datasets import ImageFolder
8
+ from torchvision import transforms
9
+ from tqdm import tqdm
10
+ import os
11
+ import itertools
12
+ from PIL import Image
13
+ import numpy as np
14
+ import argparse
15
+ import random
16
+
17
+ from skimage.metrics import peak_signal_noise_ratio as psnr_loss
18
+ from skimage.metrics import structural_similarity as ssim_loss
19
+ from diffusers.models import AutoencoderKL
20
+
21
+
22
+ class SingleFolderDataset(Dataset):
23
+ def __init__(self, directory, transform=None):
24
+ super().__init__()
25
+ self.directory = directory
26
+ self.transform = transform
27
+ self.image_paths = [os.path.join(directory, file_name) for file_name in os.listdir(directory)
28
+ if os.path.isfile(os.path.join(directory, file_name))]
29
+
30
+ def __len__(self):
31
+ return len(self.image_paths)
32
+
33
+ def __getitem__(self, idx):
34
+ image_path = self.image_paths[idx]
35
+ image = Image.open(image_path).convert('RGB')
36
+ if self.transform:
37
+ image = self.transform(image)
38
+ return image, torch.tensor(0)
39
+
40
+
41
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
42
+ """
43
+ Builds a single .npz file from a folder of .png samples.
44
+ """
45
+ samples = []
46
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
47
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
48
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
49
+ samples.append(sample_np)
50
+
51
+ random.shuffle(samples) # This is very important for IS(Inception Score) !!!
52
+ samples = np.stack(samples)
53
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
54
+ npz_path = f"{sample_dir}.npz"
55
+ np.savez(npz_path, arr_0=samples)
56
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
57
+ return npz_path
58
+
59
+
60
+ def center_crop_arr(pil_image, image_size):
61
+ """
62
+ Center cropping implementation from ADM.
63
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
64
+ """
65
+ while min(*pil_image.size) >= 2 * image_size:
66
+ pil_image = pil_image.resize(
67
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
68
+ )
69
+
70
+ scale = image_size / min(*pil_image.size)
71
+ pil_image = pil_image.resize(
72
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
73
+ )
74
+
75
+ arr = np.array(pil_image)
76
+ crop_y = (arr.shape[0] - image_size) // 2
77
+ crop_x = (arr.shape[1] - image_size) // 2
78
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
79
+
80
+
81
+ def main(args):
82
+ # Setup PyTorch:
83
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
84
+ torch.set_grad_enabled(False)
85
+
86
+ # Setup DDP:
87
+ dist.init_process_group("nccl")
88
+ rank = dist.get_rank()
89
+ device = rank % torch.cuda.device_count()
90
+ seed = args.global_seed * dist.get_world_size() + rank
91
+ torch.manual_seed(seed)
92
+ torch.cuda.set_device(device)
93
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
94
+
95
+ # load vae
96
+ vae = AutoencoderKL.from_pretrained(f"stabilityai/{args.vae}").to(device)
97
+
98
+ # Create folder to save samples:
99
+ folder_name = f"stabilityai-{args.vae}-{args.dataset}-size-{args.image_size}-seed-{args.global_seed}"
100
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
101
+ if rank == 0:
102
+ os.makedirs(sample_folder_dir, exist_ok=True)
103
+ print(f"Saving .png samples at {sample_folder_dir}")
104
+ dist.barrier()
105
+
106
+ # Setup data:
107
+ transform = transforms.Compose([
108
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
109
+ transforms.ToTensor(),
110
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
111
+ ])
112
+ if args.dataset == 'imagenet':
113
+ dataset = ImageFolder(args.data_path, transform=transform)
114
+ num_fid_samples = 50000
115
+ elif args.dataset == 'coco':
116
+ dataset = SingleFolderDataset(args.data_path, transform=transform)
117
+ num_fid_samples = 5000
118
+ else:
119
+ raise Exception("please check dataset")
120
+
121
+ sampler = DistributedSampler(
122
+ dataset,
123
+ num_replicas=dist.get_world_size(),
124
+ rank=rank,
125
+ shuffle=False,
126
+ seed=args.global_seed
127
+ )
128
+ loader = DataLoader(
129
+ dataset,
130
+ batch_size=args.per_proc_batch_size,
131
+ shuffle=False,
132
+ sampler=sampler,
133
+ num_workers=args.num_workers,
134
+ pin_memory=True,
135
+ drop_last=False
136
+ )
137
+
138
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
139
+ n = args.per_proc_batch_size
140
+ global_batch_size = n * dist.get_world_size()
141
+
142
+ psnr_val_rgb = []
143
+ ssim_val_rgb = []
144
+ loader = tqdm(loader) if rank == 0 else loader
145
+ total = 0
146
+ for x, _ in loader:
147
+ rgb_gts = x
148
+ rgb_gts = (rgb_gts.permute(0, 2, 3, 1).to("cpu").numpy() + 1.0) / 2.0 # rgb_gt value is between [0, 1]
149
+ x = x.to(device)
150
+ with torch.no_grad():
151
+ # Map input images to latent space + normalize latents:
152
+ latent = vae.encode(x).latent_dist.sample().mul_(0.18215)
153
+ # reconstruct:
154
+ samples = vae.decode(latent / 0.18215).sample # output value is between [-1, 1]
155
+ samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
156
+
157
+ # Save samples to disk as individual .png files
158
+ for i, (sample, rgb_gt) in enumerate(zip(samples, rgb_gts)):
159
+ index = i * dist.get_world_size() + rank + total
160
+ Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
161
+ # metric
162
+ rgb_restored = sample.astype(np.float32) / 255. # rgb_restored value is between [0, 1]
163
+ psnr = psnr_loss(rgb_restored, rgb_gt)
164
+ ssim = ssim_loss(rgb_restored, rgb_gt, multichannel=True, data_range=2.0, channel_axis=-1)
165
+ psnr_val_rgb.append(psnr)
166
+ ssim_val_rgb.append(ssim)
167
+ total += global_batch_size
168
+
169
+ # ------------------------------------
170
+ # Summary
171
+ # ------------------------------------
172
+ # Make sure all processes have finished saving their samples
173
+ dist.barrier()
174
+ world_size = dist.get_world_size()
175
+ gather_psnr_val = [None for _ in range(world_size)]
176
+ gather_ssim_val = [None for _ in range(world_size)]
177
+ dist.all_gather_object(gather_psnr_val, psnr_val_rgb)
178
+ dist.all_gather_object(gather_ssim_val, ssim_val_rgb)
179
+
180
+ if rank == 0:
181
+ gather_psnr_val = list(itertools.chain(*gather_psnr_val))
182
+ gather_ssim_val = list(itertools.chain(*gather_ssim_val))
183
+ psnr_val_rgb = sum(gather_psnr_val) / len(gather_psnr_val)
184
+ ssim_val_rgb = sum(gather_ssim_val) / len(gather_ssim_val)
185
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb))
186
+
187
+ result_file = f"{sample_folder_dir}_results.txt"
188
+ print("writing results to {}".format(result_file))
189
+ with open(result_file, 'w') as f:
190
+ print("PSNR: %f, SSIM: %f " % (psnr_val_rgb, ssim_val_rgb), file=f)
191
+
192
+ create_npz_from_sample_folder(sample_folder_dir, num_fid_samples)
193
+ print("Done.")
194
+
195
+ dist.barrier()
196
+ dist.destroy_process_group()
197
+
198
+
199
+ if __name__ == "__main__":
200
+ parser = argparse.ArgumentParser()
201
+ parser.add_argument("--data-path", type=str, required=True)
202
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco'], default='imagenet')
203
+ parser.add_argument("--vae", type=str, choices=["sdxl-vae", "sd-vae-ft-mse"], default="sd-vae-ft-mse")
204
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
205
+ parser.add_argument("--sample-dir", type=str, default="reconstructions")
206
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
207
+ parser.add_argument("--global-seed", type=int, default=0)
208
+ parser.add_argument("--num-workers", type=int, default=4)
209
+ args = parser.parse_args()
210
+ main(args)
tokenizer/vae/sd_vae_demo.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from PIL import Image
6
+ from diffusers.models import AutoencoderKL
7
+
8
+
9
+ def main(args):
10
+ # Setup PyTorch:
11
+ torch.manual_seed(args.seed)
12
+ torch.set_grad_enabled(False)
13
+ device = "cuda" if torch.cuda.is_available() else "cpu"
14
+
15
+ # create and load model
16
+ vae = AutoencoderKL.from_pretrained(f"stabilityai/{args.vae}").to(device)
17
+
18
+ # load image
19
+ img_path = args.image_path
20
+ out_path = args.image_path.replace('.jpg', '_vae.jpg').replace('.jpeg', '_vae.jpeg').replace('.png', '_vae.png')
21
+ input_size = args.image_size
22
+ img = Image.open(img_path).convert("RGB")
23
+
24
+ # preprocess
25
+ size_org = img.size
26
+ img = img.resize((input_size, input_size))
27
+ img = np.array(img) / 255.
28
+ x = 2.0 * img - 1.0 # x value is between [-1, 1]
29
+ x = torch.tensor(x)
30
+ x = x.unsqueeze(dim=0)
31
+ x = torch.einsum('nhwc->nchw', x)
32
+ x_input = x.float().to("cuda")
33
+
34
+ # inference
35
+ with torch.no_grad():
36
+ # Map input images to latent space + normalize latents:
37
+ latent = vae.encode(x_input).latent_dist.sample().mul_(0.18215)
38
+ # reconstruct:
39
+ output = vae.decode(latent / 0.18215).sample # output value is between [-1, 1]
40
+
41
+ # postprocess
42
+ output = F.interpolate(output, size=[size_org[1], size_org[0]], mode='bilinear').permute(0, 2, 3, 1)[0]
43
+ sample = torch.clamp(127.5 * output + 128.0, 0, 255).to("cpu", dtype=torch.uint8).numpy()
44
+
45
+ # save
46
+ Image.fromarray(sample).save(out_path)
47
+ print("Reconstructed image is saved to {}".format(out_path))
48
+
49
+
50
+ if __name__ == "__main__":
51
+ parser = argparse.ArgumentParser()
52
+ parser.add_argument("--image-path", type=str, default="assets/example.jpg")
53
+ parser.add_argument("--vae", type=str, choices=["sdxl-vae", "sd-vae-ft-mse"], default="sd-vae-ft-mse")
54
+ parser.add_argument("--image-size", type=int, choices=[256, 512, 1024], default=512)
55
+ parser.add_argument("--seed", type=int, default=0)
56
+ args = parser.parse_args()
57
+ main(args)
tokenizer/validation/val_ddp.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ torch.backends.cuda.matmul.allow_tf32 = True
3
+ torch.backends.cudnn.allow_tf32 = True
4
+ import torch.distributed as dist
5
+ from torch.utils.data import Dataset, DataLoader
6
+ from torch.utils.data.distributed import DistributedSampler
7
+ from torchvision.datasets import ImageFolder
8
+ from torchvision import transforms
9
+ from tqdm import tqdm
10
+ import os
11
+ from PIL import Image
12
+ import numpy as np
13
+ import argparse
14
+ import random
15
+
16
+
17
+ class SingleFolderDataset(Dataset):
18
+ def __init__(self, directory, transform=None):
19
+ super().__init__()
20
+ self.directory = directory
21
+ self.transform = transform
22
+ self.image_paths = [os.path.join(directory, file_name) for file_name in os.listdir(directory)
23
+ if os.path.isfile(os.path.join(directory, file_name))]
24
+
25
+ def __len__(self):
26
+ return len(self.image_paths)
27
+
28
+ def __getitem__(self, idx):
29
+ image_path = self.image_paths[idx]
30
+ image = Image.open(image_path).convert('RGB')
31
+ if self.transform:
32
+ image = self.transform(image)
33
+ return image, torch.tensor(0)
34
+
35
+
36
+ def create_npz_from_sample_folder(sample_dir, num=50_000):
37
+ """
38
+ Builds a single .npz file from a folder of .png samples.
39
+ """
40
+ samples = []
41
+ for i in tqdm(range(num), desc="Building .npz file from samples"):
42
+ sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
43
+ sample_np = np.asarray(sample_pil).astype(np.uint8)
44
+ samples.append(sample_np)
45
+
46
+ random.shuffle(samples) # This is very important for IS(Inception Score) !!!
47
+ samples = np.stack(samples)
48
+ assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
49
+ npz_path = f"{sample_dir}.npz"
50
+ np.savez(npz_path, arr_0=samples)
51
+ print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
52
+ return npz_path
53
+
54
+
55
+ def center_crop_arr(pil_image, image_size):
56
+ """
57
+ Center cropping implementation from ADM.
58
+ https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
59
+ """
60
+ while min(*pil_image.size) >= 2 * image_size:
61
+ pil_image = pil_image.resize(
62
+ tuple(x // 2 for x in pil_image.size), resample=Image.BOX
63
+ )
64
+
65
+ scale = image_size / min(*pil_image.size)
66
+ pil_image = pil_image.resize(
67
+ tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
68
+ )
69
+
70
+ arr = np.array(pil_image)
71
+ crop_y = (arr.shape[0] - image_size) // 2
72
+ crop_x = (arr.shape[1] - image_size) // 2
73
+ return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
74
+
75
+
76
+ def main(args):
77
+ # Setup PyTorch:
78
+ assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
79
+ torch.set_grad_enabled(False)
80
+
81
+ # Setup env
82
+ dist.init_process_group("nccl")
83
+ rank = dist.get_rank()
84
+ device = rank % torch.cuda.device_count()
85
+ seed = args.global_seed * dist.get_world_size() + rank
86
+ torch.manual_seed(seed)
87
+ torch.cuda.set_device(device)
88
+ print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
89
+
90
+ # Create folder to save samples:
91
+ folder_name = f"val_{args.dataset}"
92
+ sample_folder_dir = f"{args.sample_dir}/{folder_name}"
93
+ if rank == 0:
94
+ os.makedirs(sample_folder_dir, exist_ok=True)
95
+ print(f"Saving .png samples at {sample_folder_dir}")
96
+ dist.barrier()
97
+
98
+ # Setup data:
99
+ transform = transforms.Compose([
100
+ transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
101
+ transforms.ToTensor(),
102
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
103
+ ])
104
+
105
+ if args.dataset == 'imagenet':
106
+ dataset = ImageFolder(args.data_path, transform=transform)
107
+ num_fid_samples = 50000
108
+ elif args.dataset == 'coco':
109
+ dataset = SingleFolderDataset(args.data_path, transform=transform)
110
+ num_fid_samples = 5000
111
+ else:
112
+ raise Exception("please check dataset")
113
+
114
+ sampler = DistributedSampler(
115
+ dataset,
116
+ num_replicas=dist.get_world_size(),
117
+ rank=rank,
118
+ shuffle=False,
119
+ seed=args.global_seed
120
+ )
121
+ loader = DataLoader(
122
+ dataset,
123
+ batch_size=args.per_proc_batch_size,
124
+ shuffle=False,
125
+ sampler=sampler,
126
+ num_workers=args.num_workers,
127
+ pin_memory=True,
128
+ drop_last=False
129
+ )
130
+
131
+ # Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
132
+ n = args.per_proc_batch_size
133
+ global_batch_size = n * dist.get_world_size()
134
+
135
+ loader = tqdm(loader) if rank == 0 else loader
136
+ total = 0
137
+ for x, _ in loader:
138
+ samples = torch.clamp(127.5 * x + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
139
+ # Save samples to disk as individual .png files
140
+ for i, sample in enumerate(samples):
141
+ index = i * dist.get_world_size() + rank + total
142
+ Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
143
+
144
+ total += global_batch_size
145
+
146
+ # Make sure all processes have finished saving their samples before attempting to convert to .npz
147
+ dist.barrier()
148
+ if rank == 0:
149
+ create_npz_from_sample_folder(sample_folder_dir, num_fid_samples)
150
+ print("Done.")
151
+ dist.barrier()
152
+ dist.destroy_process_group()
153
+
154
+
155
+ if __name__ == "__main__":
156
+ parser = argparse.ArgumentParser()
157
+ parser.add_argument("--data-path", type=str, required=True)
158
+ parser.add_argument("--dataset", type=str, choices=['imagenet', 'coco'], default='imagenet')
159
+ parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
160
+ parser.add_argument("--sample-dir", type=str, default="reconstructions")
161
+ parser.add_argument("--per-proc-batch-size", type=int, default=32)
162
+ parser.add_argument("--global-seed", type=int, default=0)
163
+ parser.add_argument("--num-workers", type=int, default=4)
164
+ args = parser.parse_args()
165
+ main(args)
tools/openimage_json.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import json
4
+ from PIL import Image
5
+ import multiprocessing as mp
6
+
7
+ import warnings
8
+ warnings.filterwarnings('ignore')
9
+
10
+
11
+ def check_image(image_path):
12
+ try:
13
+ Image.open(image_path)
14
+ return True
15
+ except Exception as e:
16
+ print(f"Error details: {str(e)}")
17
+ return False
18
+
19
+
20
+ def check_image_path(image_info):
21
+ data_path, image_path_list = image_info # Unpack the info
22
+ valid_image_paths = []
23
+ for image_path in image_path_list:
24
+ if check_image(os.path.join(data_path, image_path)):
25
+ valid_image_paths.append(image_path)
26
+ return valid_image_paths
27
+
28
+
29
+ def load_image_path(image_info):
30
+ folder_name, data_path, image_extensions = image_info # Unpack the info
31
+ print(folder_name)
32
+
33
+ folder_path = os.path.join(data_path, folder_name)
34
+ local_image_paths = []
35
+ for image_path in os.listdir(folder_path):
36
+ _, file_extension = os.path.splitext(image_path)
37
+ if file_extension.lower() in image_extensions:
38
+ image_path_full = os.path.join(folder_name, image_path)
39
+ local_image_paths.append(image_path_full)
40
+ return local_image_paths
41
+
42
+
43
+
44
+ def main(args):
45
+ data_path = args.data_path
46
+ image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.webp']
47
+
48
+ num_processes = 47
49
+ work_list = [('openimages_{:0>4}'.format(idx), data_path, image_extensions) for idx in range(1, 48)]
50
+ with mp.Pool(processes=num_processes) as pool:
51
+ results = pool.map(load_image_path, work_list)
52
+ image_paths = [image_path for sublist in results for image_path in sublist]
53
+ print('image_paths is loaded')
54
+
55
+
56
+ num_processes = max(mp.cpu_count() // 2, 4)
57
+ unit = len(image_paths) // num_processes
58
+ work_list = [(data_path, image_paths[idx*unit:(idx+1)*unit]) for idx in range(num_processes)]
59
+ with mp.Pool(processes=num_processes) as pool:
60
+ results = pool.map(check_image_path, work_list)
61
+ valid_image_paths = [image_path for sublist in results for image_path in sublist]
62
+ print('image_paths is checked')
63
+
64
+
65
+ output_json_file_path = os.path.join(data_path, 'image_paths.json')
66
+ with open(output_json_file_path, 'w') as outfile:
67
+ json.dump(valid_image_paths, outfile, indent=4)
68
+ print(f"Image paths have been saved to {output_json_file_path}")
69
+
70
+
71
+ if __name__ == "__main__":
72
+ parser = argparse.ArgumentParser()
73
+ parser.add_argument("--data-path", type=str, required=True)
74
+ args = parser.parse_args()
75
+ main(args)
tools/push_vae_to_hf.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Script to push and load custom PyTorch models to/from the Hugging Face Hub.
3
+ """
4
+
5
+ import argparse
6
+ import torch
7
+ from tokenizer.tokenizer_image.vq_model_hf import VQ_models_HF, VQModelHF
8
+
9
+ from huggingface_hub import hf_hub_download
10
+
11
+
12
+ model2ckpt = {
13
+ "GPT-XL": ("vq_ds16_c2i.pt", "c2i_XL_384.pt", 384),
14
+ "GPT-B": ("vq_ds16_c2i.pt", "c2i_B_256.pt", 256),
15
+ }
16
+
17
+ def load_model(args):
18
+ ckpt_folder = "./"
19
+ vq_ckpt, gpt_ckpt, _ = model2ckpt[args.gpt_model]
20
+ hf_hub_download(repo_id="FoundationVision/LlamaGen", filename=vq_ckpt, local_dir=ckpt_folder)
21
+ hf_hub_download(repo_id="FoundationVision/LlamaGen", filename=gpt_ckpt, local_dir=ckpt_folder)
22
+ # create and load model
23
+ vq_model = VQ_models_HF[args.vq_model](
24
+ codebook_size=args.codebook_size,
25
+ codebook_embed_dim=args.codebook_embed_dim)
26
+ vq_model.eval()
27
+ checkpoint = torch.load(f"{ckpt_folder}{vq_ckpt}", map_location="cpu")
28
+ vq_model.load_state_dict(checkpoint["model"])
29
+ del checkpoint
30
+ print(f"image tokenizer is loaded")
31
+ return vq_model
32
+
33
+
34
+ parser = argparse.ArgumentParser()
35
+ parser.add_argument("--gpt-model", type=str, default="GPT-XL")
36
+ parser.add_argument("--vq-model", type=str, choices=list(VQ_models_HF.keys()), default="VQ-16")
37
+ parser.add_argument("--codebook-size", type=int, default=16384, help="codebook size for vector quantization")
38
+ parser.add_argument("--codebook-embed-dim", type=int, default=8, help="codebook dimension for vector quantization")
39
+ args = parser.parse_args()
40
+
41
+ # load weights
42
+ vq_model = load_model(args)
43
+
44
+ # push to hub
45
+ vq_model.push_to_hub("FoundationVision/vq-ds16-c2i")
46
+
47
+ # reload
48
+ model = VQModelHF.from_pretrained("FoundationVision/vq-ds16-c2i")
utils/__init__.py ADDED
File without changes
utils/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (156 Bytes). View file
 
utils/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (140 Bytes). View file
 
utils/__pycache__/distributed.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
utils/__pycache__/distributed.cpython-38.pyc ADDED
Binary file (1.79 kB). View file
 
utils/deepspeed.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def create_deepspeed_config(args):
2
+ ds_config = {
3
+ "steps_per_print": 1000,
4
+ "train_batch_size": args.global_batch_size,
5
+ "gradient_accumulation_steps": args.gradient_accumulation_steps,
6
+ # "train_micro_batch_size_per_gpu": args.batch_size, # determined by (train_batch_size, gradient_accumulation_steps)
7
+ "optimizer": {
8
+ "type": "Adam",
9
+ "adam_w_mode": True,
10
+ "params": {
11
+ "lr": args.lr,
12
+ "weight_decay": args.weight_decay,
13
+ "bias_correction": True,
14
+ "betas": [
15
+ args.beta1,
16
+ args.beta2
17
+ ],
18
+ }
19
+ },
20
+ "fp16": {
21
+ "enabled": args.mixed_precision == 'fp16',
22
+ "loss_scale": 0,
23
+ "initial_scale_power": 16,
24
+ "loss_scale_window": 1000,
25
+ "hysteresis": 2,
26
+ "min_loss_scale": 1
27
+ },
28
+ "bf16": {
29
+ "enabled": args.mixed_precision == 'bf16',
30
+ },
31
+ # "flops_profiler": {
32
+ # "enabled": True,
33
+ # "profile_step": -1,
34
+ # "module_depth": -1,
35
+ # "top_modules": 1,
36
+ # "detailed": True,
37
+ # },
38
+ "zero_allow_untested_optimizer": True
39
+ }
40
+
41
+ if args.clip_grad is not None:
42
+ ds_config.update({'gradient_clipping': args.clip_grad})
43
+
44
+ if args.zero_stage == 0:
45
+ ds_config.update({"zero_optimization":
46
+ {
47
+ "stage": args.zero_stage,
48
+ "contiguous_gradients": True,
49
+ "overlap_comm": True,
50
+ }
51
+ })
52
+ elif args.zero_stage == 1:
53
+ ds_config.update({"zero_optimization":
54
+ {
55
+ "stage": args.zero_stage,
56
+ "contiguous_gradients": True,
57
+ "overlap_comm": True,
58
+ "reduce_bucket_size": 5e8,
59
+ }
60
+ })
61
+ elif args.zero_stage == 2:
62
+ ds_config.update({"zero_optimization":
63
+ {
64
+ "stage": args.zero_stage,
65
+ "contiguous_gradients": True,
66
+ "overlap_comm": True,
67
+ "reduce_scatter": True,
68
+ "reduce_bucket_size": 5e8,
69
+ "allgather_bucket_size": 5e8,
70
+ }
71
+ })
72
+ elif args.zero_stage == 3:
73
+ ds_config.update({"zero_optimization":
74
+ {
75
+ "stage": args.zero_stage,
76
+ "contiguous_gradients": True,
77
+ "overlap_comm": True,
78
+ "reduce_bucket_size": 5e8,
79
+ "stage3_prefetch_bucket_size": 5e8,
80
+ "stage3_param_persistence_threshold": 1e6,
81
+ "stage3_max_live_parameters": 1e9,
82
+ "stage3_max_reuse_distance": 1e9,
83
+ "stage3_gather_16bit_weights_on_model_save": True
84
+ }
85
+ })
86
+
87
+ return ds_config
utils/distributed.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import subprocess
4
+
5
+
6
+ def setup_for_distributed(is_master):
7
+ """
8
+ This function disables printing when not in master process
9
+ """
10
+ import builtins as __builtin__
11
+ builtin_print = __builtin__.print
12
+
13
+ def print(*args, **kwargs):
14
+ force = kwargs.pop('force', False)
15
+ if is_master or force:
16
+ builtin_print(*args, **kwargs)
17
+
18
+ __builtin__.print = print
19
+
20
+ def init_distributed_mode(args):
21
+ if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
22
+ args.rank = int(os.environ["RANK"])
23
+ args.world_size = int(os.environ['WORLD_SIZE'])
24
+ args.gpu = int(os.environ['LOCAL_RANK'])
25
+ args.dist_url = 'env://'
26
+ os.environ['LOCAL_SIZE'] = str(torch.cuda.device_count())
27
+ elif 'SLURM_PROCID' in os.environ:
28
+ proc_id = int(os.environ['SLURM_PROCID'])
29
+ ntasks = int(os.environ['SLURM_NTASKS'])
30
+ node_list = os.environ['SLURM_NODELIST']
31
+ num_gpus = torch.cuda.device_count()
32
+ addr = subprocess.getoutput(
33
+ 'scontrol show hostname {} | head -n1'.format(node_list))
34
+ os.environ['MASTER_PORT'] = os.environ.get('MASTER_PORT', '29500')
35
+ os.environ['MASTER_ADDR'] = addr
36
+ os.environ['WORLD_SIZE'] = str(ntasks)
37
+ os.environ['RANK'] = str(proc_id)
38
+ os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
39
+ os.environ['LOCAL_SIZE'] = str(num_gpus)
40
+ args.dist_url = 'env://'
41
+ args.world_size = ntasks
42
+ args.rank = proc_id
43
+ args.gpu = proc_id % num_gpus
44
+ else:
45
+ print('Not using distributed mode')
46
+ args.distributed = False
47
+ return
48
+
49
+ args.distributed = True
50
+
51
+ torch.cuda.set_device(args.gpu)
52
+ args.dist_backend = 'nccl'
53
+ print('| distributed init (rank {}): {}'.format(
54
+ args.rank, args.dist_url), flush=True)
55
+ torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
56
+ world_size=args.world_size, rank=args.rank)
57
+ torch.distributed.barrier()
58
+ setup_for_distributed(args.rank == 0)
utils/drop_path.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from timm.models.layers import DropPath
2
+ import torch
3
+
4
+ def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
5
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
6
+
7
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
8
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
9
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
10
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
11
+ 'survival rate' as the argument.
12
+
13
+ """
14
+ if drop_prob == 0. or not training:
15
+ return x
16
+ keep_prob = 1 - drop_prob
17
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
18
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
19
+ if keep_prob > 0.0 and scale_by_keep:
20
+ random_tensor.div_(keep_prob)
21
+ return x * random_tensor
22
+
23
+
24
+ class DropPath(torch.nn.Module):
25
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
26
+ """
27
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
28
+ super(DropPath, self).__init__()
29
+ self.drop_prob = drop_prob
30
+ self.scale_by_keep = scale_by_keep
31
+
32
+ def forward(self, x):
33
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
34
+
35
+ def extra_repr(self):
36
+ return f'drop_prob={round(self.drop_prob,3):0.3f}'
utils/ema.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from collections import OrderedDict
3
+
4
+ @torch.no_grad()
5
+ def update_ema(ema_model, model, decay=0.9999):
6
+ """
7
+ Step the EMA model towards the current model.
8
+ """
9
+ ema_params = OrderedDict(ema_model.named_parameters())
10
+ model_params = OrderedDict(model.named_parameters())
11
+
12
+ for name, param in model_params.items():
13
+ # TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
14
+ ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
15
+
16
+
17
+ def requires_grad(model, flag=True):
18
+ """
19
+ Set requires_grad flag for all parameters in a model.
20
+ """
21
+ for p in model.parameters():
22
+ p.requires_grad = flag
utils/logger.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import torch.distributed as dist
3
+
4
+ def create_logger(logging_dir):
5
+ """
6
+ Create a logger that writes to a log file and stdout.
7
+ """
8
+ if dist.get_rank() == 0: # real logger
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='[\033[34m%(asctime)s\033[0m] %(message)s',
12
+ datefmt='%Y-%m-%d %H:%M:%S',
13
+ handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")]
14
+ )
15
+ logger = logging.getLogger(__name__)
16
+ else: # dummy logger (does nothing)
17
+ logger = logging.getLogger(__name__)
18
+ logger.addHandler(logging.NullHandler())
19
+ return logger
utils/video.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import numpy as np
3
+ import skvideo.io
4
+ from PIL import Image
5
+
6
+ # Shifts src_tf dim to dest dim
7
+ # i.e. shift_dim(x, 1, -1) would be (b, c, t, h, w) -> (b, t, h, w, c)
8
+ def shift_dim(x, src_dim=-1, dest_dim=-1, make_contiguous=True):
9
+ n_dims = len(x.shape)
10
+ if src_dim < 0:
11
+ src_dim = n_dims + src_dim
12
+ if dest_dim < 0:
13
+ dest_dim = n_dims + dest_dim
14
+
15
+ assert 0 <= src_dim < n_dims and 0 <= dest_dim < n_dims
16
+
17
+ dims = list(range(n_dims))
18
+ del dims[src_dim]
19
+
20
+ permutation = []
21
+ ctr = 0
22
+ for i in range(n_dims):
23
+ if i == dest_dim:
24
+ permutation.append(src_dim)
25
+ else:
26
+ permutation.append(dims[ctr])
27
+ ctr += 1
28
+ x = x.permute(permutation)
29
+ if make_contiguous:
30
+ x = x.contiguous()
31
+ return x
32
+
33
+ # reshapes tensor start from dim i (inclusive)
34
+ # to dim j (exclusive) to the desired shape
35
+ # e.g. if x.shape = (b, thw, c) then
36
+ # view_range(x, 1, 2, (t, h, w)) returns
37
+ # x of shape (b, t, h, w, c)
38
+ def view_range(x, i, j, shape):
39
+ shape = tuple(shape)
40
+
41
+ n_dims = len(x.shape)
42
+ if i < 0:
43
+ i = n_dims + i
44
+
45
+ if j is None:
46
+ j = n_dims
47
+ elif j < 0:
48
+ j = n_dims + j
49
+
50
+ assert 0 <= i < j <= n_dims
51
+
52
+ x_shape = x.shape
53
+ target_shape = x_shape[:i] + shape + x_shape[j:]
54
+ return x.view(target_shape)
55
+
56
+
57
+ def tensor_slice(x, begin, size):
58
+ assert all([b >= 0 for b in begin])
59
+ size = [l - b if s == -1 else s
60
+ for s, b, l in zip(size, begin, x.shape)]
61
+ assert all([s >= 0 for s in size])
62
+
63
+ slices = [slice(b, b + s) for b, s in zip(begin, size)]
64
+ return x[slices]
65
+
66
+
67
+ def save_video_grid(video, fname, nrow=None, fps=5):
68
+ b, c, t, h, w = video.shape
69
+ video = video.permute(0, 2, 3, 4, 1)
70
+ video = (video.cpu().numpy() * 255).astype('uint8')
71
+
72
+ if nrow is None:
73
+ nrow = math.ceil(math.sqrt(b))
74
+ ncol = math.ceil(b / nrow)
75
+ padding = 1
76
+ video_grid = np.zeros((t, (padding + h) * nrow + padding,
77
+ (padding + w) * ncol + padding, c), dtype='uint8')
78
+ for i in range(b):
79
+ r = i // ncol
80
+ c = i % ncol
81
+
82
+ start_r = (padding + h) * r
83
+ start_c = (padding + w) * c
84
+ video_grid[:, start_r:start_r + h, start_c:start_c + w] = video[i]
85
+
86
+ skvideo.io.vwrite(fname, video_grid, inputdict={'-r': '{}'.format(fps)})
87
+
88
+
89
+ def save_gif_grid(video, file_name, nrow=None, fps=5):
90
+ b, c, t, h, w = video.shape
91
+ video = video.permute(0, 2, 3, 4, 1)
92
+ video = (video.cpu().numpy() * 255).astype('uint8')
93
+
94
+ if nrow is None:
95
+ nrow = math.ceil(math.sqrt(b))
96
+ ncol = math.ceil(b / nrow)
97
+ padding = 1
98
+ video_grid = np.zeros((t, (padding + h) * nrow + padding,
99
+ (padding + w) * ncol + padding, c), dtype='uint8')
100
+ for i in range(b):
101
+ r = i // ncol
102
+ c = i % ncol
103
+
104
+ start_r = (padding + h) * r
105
+ start_c = (padding + w) * c
106
+ video_grid[:, start_r:start_r + h, start_c:start_c + w] = video[i]
107
+
108
+ images = []
109
+ for frame in video_grid:
110
+ images.append(Image.fromarray(frame))
111
+
112
+ # Save the first image and append the rest of the images as frames in the GIF
113
+ images[0].save(file_name, save_all=True, append_images=images[1:], optimize=False, duration=int(1000/fps), loop=0)
114
+
115
+ # The 'duration' parameter defines the display time for each frame in milliseconds
116
+ # The 'loop' parameter defines the number of loops the GIF should make (0 for infinite loop)