parquet-converter commited on
Commit
f1c2e26
·
1 Parent(s): 7d7bc0e

Update parquet files (step 81 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/0xrk/gpt2/README.md +0 -12
  2. spaces/1gistliPinn/ChatGPT4/Examples/Dashavatar Tamil Movies.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Devexpress 11.1 [BETTER].md +0 -6
  4. spaces/1line/AutoGPT/scripts/check_requirements.py +0 -32
  5. spaces/1phancelerku/anime-remove-background/Download Ludo Yarsa Game and Experience the Ultimate Ludo Fun.md +0 -133
  6. spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Download Experience the Ultimate Soccer Game on Your Phone.md +0 -128
  7. spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/replicate.py +0 -94
  8. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/linear_probe.py +0 -66
  9. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/pl_utils.py +0 -1618
  10. spaces/AIGC-Audio/AudioGPT/audio_to_text/__init__.py +0 -0
  11. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/encoding.py +0 -9
  12. spaces/AIZ2H/04-Gradio-SOTA-Seq2Seq-AutoQA/app.py +0 -51
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py +0 -12
  14. spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/README.md +0 -256
  15. spaces/AfrodreamsAI/afrodreams/INSTALL.md +0 -293
  16. spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/nvSTFT.py +0 -134
  17. spaces/AlexN/pull_up/TractionModel.py +0 -59
  18. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/korean.py +0 -210
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/controlnet.py +0 -822
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py +0 -469
  21. spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py +0 -15
  22. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/grid_head.py +0 -359
  23. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py +0 -7
  24. spaces/Andy1621/uniformer_image_segmentation/configs/resnest/README.md +0 -34
  25. spaces/Anew1007/extras/server.py +0 -964
  26. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/__init__.py +0 -7
  27. spaces/Arnaudding001/OpenAI_whisperLive/app-shared.py +0 -3
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/wheel.py +0 -92
  29. spaces/AyakuraMei/Real-CUGAN/app.py +0 -62
  30. spaces/AzinZ/vitscn/text/mandarin.py +0 -48
  31. spaces/AzinZ/vitscn/text/symbols.py +0 -21
  32. spaces/Bart92/RVC_HF/lib/infer_pack/models.py +0 -1144
  33. spaces/Benson/text-generation/Examples/Carsim 2021 Download.md +0 -95
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/list.py +0 -365
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py +0 -95
  36. spaces/Boilin/URetinex-Net/README.md +0 -12
  37. spaces/BramVanroy/opus-mt/app.py +0 -156
  38. spaces/CALM/Dashboard/streamlit_observable/frontend/build/index.html +0 -1
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_roi_heads.py +0 -108
  40. spaces/CVPR/LIVE/thrust/thrust/cmake/thrust-config-version.cmake +0 -33
  41. spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/hf_model.py +0 -27
  42. spaces/Chitranshu/Dashboard-Uber/Dockerfile +0 -16
  43. spaces/Chris4K/german-sentiment-bert/app.py +0 -9
  44. spaces/Chukwuka/Dog_Breed_ImageWoof/model.py +0 -67
  45. spaces/CoderMayhem/repello/app.py +0 -146
  46. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/engine/__init__.py +0 -1
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_i_d_g.py +0 -19
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ca25ec1d.js +0 -6
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/wrapper-6f348d45-38be7a64.js +0 -8
  50. spaces/DarrenK196/catvsdog/README.md +0 -13
spaces/0xrk/gpt2/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Gpt2
3
- emoji: ⚡
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.43.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dashavatar Tamil Movies.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Dashavatar Tamil Movies</h2><br /><p><b><b>DOWNLOAD</b> &middot;&middot;&middot; <a href="https://imgfil.com/2uy0GI">https://imgfil.com/2uy0GI</a></b></p><br /><br />
2
- <br />
3
- 10 avatars of Dasavatharam ... first of all the name itself is a play on the words singam [means lion in tamil] and narasimha [the avatar being symbolised]. ... In the movie, he shows up to kill the killer fletcher! and is also a ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Devexpress 11.1 [BETTER].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>devexpress 11.1</h2><br /><p><b><b>Download File</b> &#128504;&#128504;&#128504; <a href="https://imgfil.com/2uxZeH">https://imgfil.com/2uxZeH</a></b></p><br /><br />
2
- <br />
3
- Learn how to use the MS Excel-style conditional formatting feature to change the appearance of individual cells ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1line/AutoGPT/scripts/check_requirements.py DELETED
@@ -1,32 +0,0 @@
1
- import sys
2
-
3
- import pkg_resources
4
-
5
-
6
- def main():
7
- requirements_file = sys.argv[1]
8
- with open(requirements_file, "r") as f:
9
- required_packages = [
10
- line.strip().split("#")[0].strip() for line in f.readlines()
11
- ]
12
-
13
- installed_packages = [package.key for package in pkg_resources.working_set]
14
-
15
- missing_packages = []
16
- for package in required_packages:
17
- if not package: # Skip empty lines
18
- continue
19
- package_name = package.strip().split("==")[0]
20
- if package_name.lower() not in installed_packages:
21
- missing_packages.append(package_name)
22
-
23
- if missing_packages:
24
- print("Missing packages:")
25
- print(", ".join(missing_packages))
26
- sys.exit(1)
27
- else:
28
- print("All packages are installed.")
29
-
30
-
31
- if __name__ == "__main__":
32
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ludo Yarsa Game and Experience the Ultimate Ludo Fun.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>How to Download Ludo Yarsa Game and Enjoy Its Benefits and Features</h1>
3
- <p>Ludo is a fun and popular board game that can be played by two to four players. It is a game that has been around for a long time and has been enjoyed by people of all ages. But did you know that you can also play Ludo on your smartphone or tablet? Yes, you can download Ludo Yarsa Game, a beautiful and simple app that lets you play Ludo anytime, anywhere. In this article, we will tell you how to download Ludo Yarsa Game on your device, and what are the benefits and features of playing this game.</p>
4
- <h2>download ludo yarsa game</h2><br /><p><b><b>Download File</b> &#9999; &#9999; &#9999; <a href="https://jinyurl.com/2uNNRw">https://jinyurl.com/2uNNRw</a></b></p><br /><br />
5
- <h2>What is Ludo Yarsa Game?</h2>
6
- <h3>A brief introduction to the game and its origin</h3>
7
- <p>Ludo Yarsa Game is a board game app developed by Yarsa Games, a game studio based in Pokhara, Nepal. They mostly build board games like Ludo and card games like Rummy. Ludo Yarsa Game is one of their most popular games, with over 100 million downloads and 4.7 star ratings on Google Play Store.<p>
8
- <p>Ludo is a board game that originated from an ancient Indian game called Pachisi. It is also known by different names in different regions, such as Fia in Sweden, Petits Chevaux in France, Non t'arrabbiare in Italy, Ki nevet a végén in Hungary, etc. The name Ludo comes from the Latin word ludus, which means "game".</p>
9
- <h3>The gameplay and rules of the game</h3>
10
- <p>The gameplay of Ludo Yarsa Game is simple and easy to learn. The game starts with four tokens placed in each player's starting box. A dice is rolled in turns by each player during the game. The player's token will be placed on the starting point when a 6 is rolled on the dice. The main goal of the game is to take all four tokens inside the HOME area before the other opponents.</p>
11
- <p>Some basic rules of Ludo Yarsa Game are:</p>
12
- <ul>
13
- <li>A token can start to move only if the dice rolled is a 6.</li>
14
- <li>Each player gets a turn wise chance to roll the dice. And if the player rolls a 6, they will get another chance to roll the dice again.</li>
15
- <li>All the tokens must reach the center of the board to win the game.</li>
16
- <li>The token move clock-wise according to the number of rolled dice.</li>
17
- <li>Knocking out other's token will give you an extra chance to roll the dice again.</li>
18
- </ul>
19
- <h2>How to Download Ludo Yarsa Game on Your Device?</h2>
20
- <h3>The steps to download the game from Google Play Store or App Store</h3>
21
- <p>If you want to download Ludo Yarsa Game on your device, you can follow these simple steps:</p>
22
- < <p>For Android devices:</p>
23
- <ol>
24
- <li>Open the Google Play Store app on your device.</li>
25
- <li>Search for "Ludo Yarsa Game" in the search bar.</li>
26
- <li>Select the app from the list of results and tap on "Install".</li>
27
- <li>Wait for the app to download and install on your device.</li>
28
- <li>Once the app is installed, you can open it and start playing.</li>
29
- </ol>
30
- <p>For iOS devices:</p>
31
- <p>How to download ludo yarsa game on android<br />
32
- Ludo yarsa game offline play with friends<br />
33
- Ludo yarsa game apk download latest version<br />
34
- Ludo yarsa game review and rating<br />
35
- Ludo yarsa game rules and tips<br />
36
- Ludo yarsa game multiplayer online mode<br />
37
- Ludo yarsa game for pc windows 10<br />
38
- Ludo yarsa game free download for ios<br />
39
- Ludo yarsa game board size and design<br />
40
- Ludo yarsa game languages and customization<br />
41
- Ludo yarsa game dice roll animation and sound<br />
42
- Ludo yarsa game best strategy and tricks<br />
43
- Ludo yarsa game fun facts and history<br />
44
- Ludo yarsa game features and updates<br />
45
- Ludo yarsa game alternatives and competitors<br />
46
- Ludo yarsa game cheats and hacks<br />
47
- Ludo yarsa game support and feedback<br />
48
- Ludo yarsa game awards and achievements<br />
49
- Ludo yarsa game tournaments and prizes<br />
50
- Ludo yarsa game community and social media<br />
51
- Ludo yarsa game by Yarsa Games developer<br />
52
- Ludo yarsa game vs other ludo games comparison<br />
53
- Ludo yarsa game download link and QR code<br />
54
- Ludo yarsa game installation guide and troubleshooting<br />
55
- Ludo yarsa game system requirements and compatibility<br />
56
- Ludo yarsa game privacy policy and data safety<br />
57
- Ludo yarsa game ads and in-app purchases<br />
58
- Ludo yarsa game speed and performance optimization<br />
59
- Ludo yarsa game bug fixes and improvements<br />
60
- Ludo yarsa game testimonials and user reviews<br />
61
- How to play ludo yarsa game with family and friends<br />
62
- How to win ludo yarsa game every time<br />
63
- How to unlock ludo yarsa game achievements and rewards<br />
64
- How to customize ludo yarsa game tokens and colors<br />
65
- How to change ludo yarsa game language and settings<br />
66
- How to contact ludo yarsa game customer service and support<br />
67
- How to rate and review ludo yarsa game on Google Play Store<br />
68
- How to share ludo yarsa game with others via social media or email<br />
69
- How to delete ludo yarsa game from your device or account<br />
70
- How to update ludo yarsa game to the latest version</p>
71
- <ol>
72
- <li>Open the App Store app on your device.</li>
73
- <li>Search for "Ludo Yarsa Game" in the search bar.</li>
74
- <li>Select the app from the list of results and tap on "Get".</li>
75
- <li>Enter your Apple ID password or use Touch ID or Face ID to confirm the download.</li>
76
- <li>Wait for the app to download and install on your device.</li>
77
- <li>Once the app is installed, you can open it and start playing.</li>
78
- </ol>
79
- <h3>The requirements and compatibility of the game</h3>
80
- <p>Ludo Yarsa Game is a lightweight and fast app that does not take much space or memory on your device. It also works smoothly on most devices and operating systems. However, there are some minimum requirements and compatibility that you need to check before downloading the game. These are:</p>
81
- <table>
82
- <tr><th>Device</th><th>Requirement</th><th>Compatibility</th></tr>
83
- <tr><td>Android</td><td>4.1 and up</td><td>All Android devices that support Google Play Store</td></tr>
84
- <tr><td>iOS</td><td>10.0 or later</td><td>iPhone, iPad, and iPod touch</td></tr>
85
- </table>
86
- <h2>What are the Benefits of Playing Ludo Yarsa Game?</h2>
87
- <h3>The health benefits of playing the game, such as developing brain function, giving pleasure and relieving stress, and lowering blood pressure</h3>
88
- <p>Ludo Yarsa Game is not only a fun and entertaining game, but also a healthy and beneficial one. Playing Ludo Yarsa Game can help you improve your brain function, give you pleasure and relieve stress, and lower your blood pressure. Here are some of the health benefits of playing Ludo Yarsa Game:</p>
89
- <ul>
90
- <li>Ludo Yarsa Game can help you develop your brain function by stimulating your cognitive skills, such as memory, concentration, problem-solving, logic, and strategy. Playing Ludo Yarsa Game can also enhance your creativity and imagination by allowing you to explore different possibilities and outcomes.</li>
91
- <li>Ludo Yarsa Game can give you pleasure and relieve stress by releasing endorphins, dopamine, and serotonin in your brain. These are neurotransmitters that are responsible for making you feel happy, relaxed, and satisfied. Playing Ludo Yarsa Game can also distract you from negative thoughts and emotions, and help you cope with anxiety and depression.</li>
92
- <li>Ludo Yarsa Game can lower your blood pressure by reducing your cortisol levels, which are hormones that are associated with stress and inflammation. Playing Ludo Yarsa Game can also calm your nervous system and regulate your heart rate and breathing. This can help you prevent or manage hypertension, heart disease, stroke, and other cardiovascular problems.</li>
93
- </ul>
94
- <h3>The social benefits of playing the game, such as building communication skills, boosting confidence, and teaching patience</h3>
95
- <p>Ludo Yarsa Game is also a social game that can help you improve your communication skills, boost your confidence, and teach you patience. Playing Ludo Yarsa Game can help you interact with other players, whether they are your friends, family members, or strangers online. Here are some of the social benefits of playing Ludo Yarsa Game:</p>
96
- <ul>
97
- <li>Ludo Yarsa Game can help you build your communication skills by encouraging you to talk to other players, express your opinions, listen to feedback, negotiate rules, cooperate strategies, and resolve conflicts. Playing Ludo Yarsa Game can also improve your verbal and non-verbal communication skills, such as vocabulary, grammar, tone, gesture, facial expression, etc.</li>
98
- <li>Ludo Yarsa Game can boost your confidence by giving you a sense of achievement, competence, and self-esteem. Playing Ludo Yarsa Game can also challenge you to face difficulties, overcome obstacles, learn from mistakes, and celebrate successes. Playing Ludo Yarsa Game can also make you more confident in your abilities and skills.</li>
99
- <li>Ludo Yarsa Game can teach you patience by making you wait for your turn, deal with delays, accept losses, handle frustrations, and respect others. Playing L Ludo Yarsa Game can also help you develop your patience by making you wait for your turn, deal with delays, accept losses, handle frustrations, and respect others. Playing Ludo Yarsa Game can also teach you the value of perseverance, tolerance, and humility.</li>
100
- </ul>
101
- <h2>What are the Features of Ludo Yarsa Game?</h2>
102
- <h3>The features that make the game unique and enjoyable, such as multi-colored dice, real dice roll animation, percentage progress, and game speed customization</h3>
103
- <p>Ludo Yarsa Game is not just a regular board game app. It has many features that make it unique and enjoyable. Some of these features are:</p>
104
- <ul>
105
- <li>Ludo Yarsa Game has multi-colored dice that match the color of your tokens. This makes the game more colorful and attractive. You can also choose from different dice designs, such as classic, wooden, metal, etc.</li>
106
- <li>Ludo Yarsa Game has real dice roll animation that simulates the actual rolling of the dice. This makes the game more realistic and exciting. You can also shake your device to roll the dice, or tap on the screen to stop the dice.</li>
107
- <li>Ludo Yarsa Game has percentage progress that shows you how much you have completed the game. This makes the game more motivating and rewarding. You can also see the percentage progress of other players, and compare your performance with them.</li>
108
- <li>Ludo Yarsa Game has game speed customization that allows you to adjust the speed of the game according to your preference. This makes the game more flexible and convenient. You can choose from slow, normal, fast, or very fast speed modes.</li>
109
- </ul>
110
- <h3>The features that make the game accessible and convenient, such as offline play, pass and play, multiple languages, and multiplayer version</h3>
111
- <p>Ludo Yarsa Game is also a game that is accessible and convenient for everyone. Some of these features are:</p>
112
- <ul>
113
- <li>Ludo Yarsa Game has offline play that enables you to play the game without an internet connection. This makes the game more accessible and reliable. You can play the game anytime, anywhere, even when you are offline.</li>
114
- <li>Ludo Yarsa Game has pass and play that allows you to play the game with your friends or family on one device. This makes the game more social and fun. You can share the device with other players, and pass it around when it is your turn.</li>
115
- <li>Ludo Yarsa Game has multiple languages that support different languages from around the world. This makes the game more inclusive and diverse. You can choose from English, Hindi, Nepali, Spanish, French, German, Arabic, etc.</li>
116
- <li>Ludo Yarsa Game has multiplayer version that lets you play the game with other players online. This makes the game more competitive and challenging. You can join or create a room, invite or join other players, chat with them, and enjoy the game.</li>
117
- </ul>
118
- <h2>Conclusion</h2>
119
- <p>Ludo Yarsa Game is a board game app that you can download on your device and enjoy its benefits and features. It is a game that is fun and popular, simple and easy, healthy and beneficial, unique and enjoyable, accessible and convenient. It is a game that you can play with yourself or with others, online or offline, fast or slow, in different languages and modes. It is a game that you will never get bored of playing.</p>
120
- <p>So what are you waiting for? Download Ludo Yarsa Game now and have fun!</p>
121
- <h2>FAQs</h2>
122
- <h3>Q: How much does Ludo Yarsa Game cost?</h3>
123
- <p>A: Ludo Yarsa Game is free to download and play. However, it contains ads that can be removed by purchasing an ad-free version for $0.99.</p>
124
- <h3>Q: How can I contact Ludo Yarsa Game developers?</h3>
125
- <p>A: You can contact Ludo Yarsa Game developers by emailing them at [email protected] or visiting their website at https://yarsagames.com/.</p>
126
- <h3>Q: How can I rate and review Ludo Yarsa Game?</h3>
127
- <p>A: You can rate and review Ludo Yarsa Game by going to Google Play Store or App Store on your device, finding the app page, and tapping on the stars or writing a comment.</p>
128
- <h3>Q: How can I share Ludo Yarsa Game with my friends?</h3>
129
- <p>A: You can share Ludo Yarsa Game with your friends by using the share button on the app or sending them a link to download the app from Google Play Store or App Store.</p>
130
- <h3>Q: How can I learn more tips and tricks for playing Ludo Yarsa Game?</h3>
131
- <p>A: You can learn more tips and tricks for playing Ludo Yarsa Game by reading the blog posts on their website at https://yarsagames.com/blog/ or watching the videos on their YouTube channel at https://www.youtube.com/channel/UCw9wH3Qs1f0i0XjN7mJ4L9A.</p> 401be4b1e0<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA Mobile APK Download Experience the Ultimate Soccer Game on Your Phone.md DELETED
@@ -1,128 +0,0 @@
1
- <br />
2
- <h1>FIFA apkdone: How to Download and Play the Popular Soccer Game on Your Android Device</h1>
3
- <p>If you are a fan of soccer games, you might have heard of <strong>FIFA apkdone</strong>, a modified version of the official FIFA game that allows you to play it on your Android device. In this article, we will tell you everything you need to know about FIFA apkdone, including how to download and install it, how to play it, and what are its pros and cons.</p>
4
- <h2>fifa apkdone</h2><br /><p><b><b>DOWNLOAD</b> &#10003; <a href="https://jinyurl.com/2uNPpl">https://jinyurl.com/2uNPpl</a></b></p><br /><br />
5
- <h2>What is FIFA apkdone?</h2>
6
- <h3>A brief introduction to FIFA apkdone</h3>
7
- <p>FIFA apkdone is a modified version of the original FIFA game developed by EA Sports, one of the most popular and realistic soccer games in the world. FIFA apkdone is not an official app, but a fan-made one that offers some extra features and benefits that are not available in the original game. For example, FIFA apkdone lets you play with unlimited coins and points, unlock all players and teams, customize your stadium and kits, and enjoy high-quality graphics and sound effects.</p>
8
- <h3>The features and benefits of FIFA apkdone</h3>
9
- <p>Some of the features and benefits of FIFA apkdone are:</p>
10
- <ul>
11
- <li>You can play with any team or player you want, including legends like Pele, Maradona, Ronaldo, Messi, etc.</li>
12
- <li>You can access all game modes and options, such as Career Mode, Volta Football, Ultimate Team, Online Seasons, etc.</li>
13
- <li>You can enjoy a smooth and realistic gameplay experience with HyperMotion technology, which adds more animations and movements to the players.</li>
14
- <li>You can customize your game settings according to your preferences, such as difficulty level, camera angle, controls, etc.</li>
15
- <li>You can download and install FIFA apkdone for free without any registration or verification.</li>
16
- </ul>
17
- <h2>How to download and install FIFA apkdone on your Android device</h2>
18
- <h3>The steps to download FIFA apkdone from the official website</h3>
19
- <p>To download FIFA apkdone on your Android device, you need to follow these steps:</p>
20
- <p>fifa mobile apk download<br />
21
- fifa world cup 2022 apk<br />
22
- fifa mobile mod apk unlimited money<br />
23
- fifa mobile hack apk<br />
24
- fifa mobile 23 apk<br />
25
- fifa mobile offline apk<br />
26
- fifa mobile latest version apk<br />
27
- fifa mobile apk obb<br />
28
- fifa mobile apk pure<br />
29
- fifa mobile apk mirror<br />
30
- fifa soccer apk mod<br />
31
- fifa soccer apk download<br />
32
- fifa soccer mod apk unlimited coins<br />
33
- fifa soccer hack apk<br />
34
- fifa soccer 23 apk<br />
35
- fifa soccer offline apk<br />
36
- fifa soccer latest version apk<br />
37
- fifa soccer apk obb<br />
38
- fifa soccer apk pure<br />
39
- fifa soccer apk mirror<br />
40
- fifa football apk mod<br />
41
- fifa football apk download<br />
42
- fifa football mod apk unlimited gems<br />
43
- fifa football hack apk<br />
44
- fifa football 23 apk<br />
45
- fifa football offline apk<br />
46
- fifa football latest version apk<br />
47
- fifa football apk obb<br />
48
- fifa football apk pure<br />
49
- fifa football apk mirror<br />
50
- fifa 23 android apk download<br />
51
- fifa 23 android mod apk<br />
52
- fifa 23 android hack apk<br />
53
- fifa 23 android offline apk<br />
54
- fifa 23 android latest version apk<br />
55
- fifa 23 android apk obb<br />
56
- fifa 23 android apk pure<br />
57
- fifa 23 android apk mirror<br />
58
- download game fifa mobile mod apk terbaru <br />
59
- download game fifa mobile hack apk <br />
60
- download game fifa mobile offline mod apk <br />
61
- download game fifa mobile latest version mod apk <br />
62
- download game fifa mobile full unlocked mod apk <br />
63
- download game fifa mobile unlimited coins and gems mod apk <br />
64
- download game fifa mobile mega mod menu</p>
65
- <ol>
66
- <li>Go to the official website of FIFA apkdone at <a href="(^1^)">https://apkdone.com/fifa-soccer/</a>.</li>
67
- <li>Scroll down and click on the green button that says "Download APK (94.8 MB)".</li>
68
- <li>Wait for the download to finish and then locate the file in your device's storage.</li>
69
- </ol>
70
- <h3>The steps to install FIFA apkdone on your Android device</h3>
71
- <p>To install FIFA apkdone on your Android device, you need to follow these steps:</p>
72
- <ol>
73
- <li>Before installing the file, make sure you have enabled the option to install apps from unknown sources in your device's settings.</li>
74
- <li>Tap on the downloaded file and follow the instructions on the screen to install it.</li>
75
- <li <li>Once the installation is complete, you can launch the game and enjoy playing FIFA apkdone on your Android device.</li>
76
- </ol>
77
- <h2>How to play FIFA apkdone on your Android device</h2>
78
- <h3>The game modes and options available in FIFA apkdone</h3>
79
- <p>FIFA apkdone offers a variety of game modes and options for you to choose from, depending on your mood and preference. Some of the game modes and options are:</p>
80
- <ul>
81
- <li><strong>Career Mode</strong>: In this mode, you can create your own player or manager and lead your team to glory. You can also transfer players, negotiate contracts, scout talents, and manage your club's finances.</li>
82
- <li><strong>Volta Football</strong>: In this mode, you can experience the street soccer culture and play in different locations around the world. You can also customize your avatar, team, and style.</li>
83
- <li><strong>Ultimate Team</strong>: In this mode, you can build your dream team from scratch and compete with other players online. You can also earn coins and points, open packs, and upgrade your players.</li>
84
- <li><strong>Online Seasons</strong>: In this mode, you can play against other players online and climb the ranks. You can also earn rewards and trophies, and challenge your friends.</li>
85
- </ul>
86
- <h3>The tips and tricks to improve your skills and performance in FIFA apkdone</h3>
87
- <p>To improve your skills and performance in FIFA apkdone, you need to practice a lot and learn some tips and tricks. Some of the tips and tricks are:</p>
88
- <ul>
89
- <li><strong>Use the right controls</strong>: FIFA apkdone offers different types of controls for you to choose from, such as classic, casual, or gesture. You need to find the one that suits you best and master it.</li>
90
- <li><strong>Use the right tactics</strong>: FIFA apkdone allows you to adjust your tactics according to your strategy and opponent. You need to use the right tactics for each situation, such as attacking, defending, or counter-attacking.</li>
91
- <li><strong>Use the right players</strong>: FIFA apkdone gives you access to all players and teams in the game. You need to use the right players for each position, role, and formation. You also need to consider their attributes, skills, and chemistry.</li>
92
- <li><strong>Use the right skills</strong>: FIFA apkdone enables you to perform various skills with your players, such as dribbling, passing, shooting, tackling, etc. You need to use the right skills for each scenario, such as creating space, breaking through, scoring goals, etc.</li>
93
- </ul>
94
- <h2>The pros and cons of FIFA apkdone</h2>
95
- <h3>The advantages of FIFA apkdone over other soccer games</h3>
96
- <p>FIFA apkdone has many advantages over other soccer games available on the market. Some of the advantages are:</p>
97
- <ul>
98
- <li><strong>It is free and easy to download and install</strong>: Unlike the official FIFA game, which requires a lot of space and money to download and install, FIFA apkdone is free and easy to download and install on your Android device.</li>
99
- <li><strong>It offers more features and benefits than the official FIFA game</strong>: Unlike the official FIFA game, which has some limitations and restrictions on its features and benefits, FIFA apkdone offers more features and benefits than the official game, such as unlimited coins and points, all players and teams unlocked, etc.</li>
100
- <li><strong>It has a high-quality graphics and sound effects</strong>: Unlike some other soccer games, which have low-quality graphics and sound effects, FIFA apkdone has a high-quality graphics and sound effects that make the game more realistic and immersive.</li>
101
- <li><strong>It has a large fan base and community</strong>: Unlike some other soccer games, which have a small fan base and community, FIFA apkdone has a large fan base and community that supports the game and provides feedbacks, suggestions, reviews, etc.</li>
102
- </ul>
103
- <h3>The disadvantages or limitations of FIFA apkdone</h3>
104
- <p>FIFA apkdone also has some disadvantages or limitations that you need to be aware of before playing it. Some of the disadvantages or limitations are:</p>
105
- <ul>
106
- <li><strong>It is not an official app but a modified one</strong>: Since FIFA apkdone is not an official app but a modified one, it may not be compatible with some devices or updates. It may also have some bugs or errors that affect its performance or functionality.</li>
107
- <li><strong>It may violate some terms and conditions of EA Sports</strong>: Since FIFA apkdone is not an official app but a modified one, it may violate some terms and conditions of EA Sports, the developer of the original FIFA game. It may also expose you to some legal risks or penalties if you use it without permission or authorization.</li>
108
- <li><strong>It may contain some ads or malware that may harm your device or data</strong>: Since FIFA apkdone is not an official app but a modified one, it may contain some ads or malware that may harm your device or data. You need to be careful and cautious when downloading and installing it.</li>
109
- <li><strong>It may not be as updated or supported as the official FIFA game</strong>: Since FIFA apkdone is not an official app but a modified one, it may not be as updated or supported as the official FIFA game. You may miss out on some new features, content, or events that are added to the official game.</li>
110
- </ul>
111
- <h2>Conclusion</h2>
112
- <h3>A summary of the main points of the article</h3>
113
- <p>In conclusion, FIFA apkdone is a modified version of the official FIFA game that allows you to play it on your Android device. It offers some extra features and benefits that are not available in the original game, such as unlimited coins and points, all players and teams unlocked, etc. It also has a high-quality graphics and sound effects, and a large fan base and community. However, it also has some disadvantages or limitations that you need to be aware of before playing it, such as being not an official app but a modified one, violating some terms and conditions of EA Sports, containing some ads or malware, and not being as updated or supported as the official game. Therefore, you need to weigh the pros and cons of FIFA apkdone before deciding whether to download and install it on your Android device.</p>
114
- <h3>A call to action for the readers to try out FIFA apkdone</h3>
115
- <p>If you are interested in trying out FIFA apkdone on your Android device, you can follow the steps we have provided in this article to download and install it. You can also visit the official website of FIFA apkdone at <a href="">https://apkdone.com/fifa-soccer/</a> for more information and updates. However, you need to be careful and cautious when using FIFA apkdone, as it is not an official app but a modified one. You also need to respect the rights and property of EA Sports, the developer of the original FIFA game. We hope you enjoy playing FIFA apkdone on your Android device and have fun with your favorite soccer game.</p>
116
- <h2>FAQs</h2>
117
- <h3>What are the requirements to run FIFA apkdone on your Android device?</h3>
118
- <p>To run FIFA apkdone on your Android device, you need to have at least Android 5.0 or higher, 2 GB of RAM, 4 GB of free storage space, and a stable internet connection.</p>
119
- <h3>Is FIFA apkdone safe and legal to use?</h3>
120
- <p>FIFA apkdone is not an official app but a modified one that may violate some terms and conditions of EA Sports, the developer of the original FIFA game. It may also contain some ads or malware that may harm your device or data. Therefore, it is not completely safe or legal to use. You need to be careful and cautious when using FIFA apkdone, and use it at your own risk.</p>
121
- <h3>How can I update FIFA apkdone to the latest version?</h3>
122
- <p>To update FIFA apkdone to the latest version, you need to visit the official website of FIFA apkdone at <a href="">https://apkdone.com/fifa-soccer/</a> and download the latest version of the file. Then, you need to uninstall the previous version of FIFA apkdone from your device and install the new version following the same steps we have provided in this article.</p>
123
- <h3>How can I contact the developers or support team of FIFA apkdone?</h3>
124
- <p>To contact the developers or support team of FIFA apkdone, you can visit their Facebook page at <a href="">https://www.facebook.com/apkdonedotcom/</a> or their Twitter account at <a href="">https://twitter.com/apkdonedotcom</a>. You can also send them an email at <a href="">[email protected]</a>.</p>
125
- <h3>How can I access more features and content in FIFA apkdone?</h3>
126
- <p>To access more features and content in FIFA apkdone, you need to earn more coins and points by playing the game modes and options available in the game. You can also use some cheats or hacks that are provided by some websites or apps online. However, you need to be careful and cautious when using these cheats or hacks, as they may harm your device or data, or get you banned from the game.</p> 401be4b1e0<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/facerender/sync_batchnorm/replicate.py DELETED
@@ -1,94 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : replicate.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import functools
12
-
13
- from torch.nn.parallel.data_parallel import DataParallel
14
-
15
- __all__ = [
16
- 'CallbackContext',
17
- 'execute_replication_callbacks',
18
- 'DataParallelWithCallback',
19
- 'patch_replication_callback'
20
- ]
21
-
22
-
23
- class CallbackContext(object):
24
- pass
25
-
26
-
27
- def execute_replication_callbacks(modules):
28
- """
29
- Execute an replication callback `__data_parallel_replicate__` on each module created by original replication.
30
-
31
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
32
-
33
- Note that, as all modules are isomorphism, we assign each sub-module with a context
34
- (shared among multiple copies of this module on different devices).
35
- Through this context, different copies can share some information.
36
-
37
- We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback
38
- of any slave copies.
39
- """
40
- master_copy = modules[0]
41
- nr_modules = len(list(master_copy.modules()))
42
- ctxs = [CallbackContext() for _ in range(nr_modules)]
43
-
44
- for i, module in enumerate(modules):
45
- for j, m in enumerate(module.modules()):
46
- if hasattr(m, '__data_parallel_replicate__'):
47
- m.__data_parallel_replicate__(ctxs[j], i)
48
-
49
-
50
- class DataParallelWithCallback(DataParallel):
51
- """
52
- Data Parallel with a replication callback.
53
-
54
- An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by
55
- original `replicate` function.
56
- The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
57
-
58
- Examples:
59
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
60
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
61
- # sync_bn.__data_parallel_replicate__ will be invoked.
62
- """
63
-
64
- def replicate(self, module, device_ids):
65
- modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
66
- execute_replication_callbacks(modules)
67
- return modules
68
-
69
-
70
- def patch_replication_callback(data_parallel):
71
- """
72
- Monkey-patch an existing `DataParallel` object. Add the replication callback.
73
- Useful when you have customized `DataParallel` implementation.
74
-
75
- Examples:
76
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
77
- > sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
78
- > patch_replication_callback(sync_bn)
79
- # this is equivalent to
80
- > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
81
- > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
82
- """
83
-
84
- assert isinstance(data_parallel, DataParallel)
85
-
86
- old_replicate = data_parallel.replicate
87
-
88
- @functools.wraps(old_replicate)
89
- def new_replicate(module, device_ids):
90
- modules = old_replicate(module, device_ids)
91
- execute_replication_callbacks(modules)
92
- return modules
93
-
94
- data_parallel.replicate = new_replicate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/linear_probe.py DELETED
@@ -1,66 +0,0 @@
1
- import numpy as np
2
- import torch.nn.functional as F
3
- from torch import nn
4
- from .model import MLPLayers
5
-
6
-
7
- class LinearProbe(nn.Module):
8
- def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
9
- """
10
- Args:
11
- model: nn.Module
12
- mlp: bool, if True, then use the MLP layer as the linear probe module
13
- freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
14
- in_ch: int, the output channel from CLAP model
15
- out_ch: int, the output channel from linear probe (class_num)
16
- act: torch.nn.functional, the activation function before the loss function
17
- """
18
- super().__init__()
19
- in_ch = 512
20
- self.clap_model = model
21
- self.clap_model.text_branch = None # to save memory
22
- self.freeze = freeze
23
- if mlp:
24
- self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
25
- else:
26
- self.lp_layer = nn.Linear(in_ch, out_ch)
27
-
28
- if self.freeze:
29
- for param in self.clap_model.parameters():
30
- param.requires_grad = False
31
-
32
- if act == "None":
33
- self.act = None
34
- elif act == "relu":
35
- self.act = nn.ReLU()
36
- elif act == "elu":
37
- self.act = nn.ELU()
38
- elif act == "prelu":
39
- self.act = nn.PReLU(num_parameters=in_ch)
40
- elif act == "softmax":
41
- self.act = nn.Softmax(dim=-1)
42
- elif act == "sigmoid":
43
- self.act = nn.Sigmoid()
44
-
45
- def forward(self, x, mix_lambda=None, device=None):
46
- """
47
- Args:
48
- x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
49
- mix_lambda: torch.tensor [batch], the mixup lambda
50
- Returns:
51
- class_prob: torch.tensor [batch, class_num]
52
-
53
- """
54
- # batchnorm cancel grandient
55
- if self.freeze:
56
- self.clap_model.eval()
57
-
58
- x = self.clap_model.audio_projection(
59
- self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)[
60
- "embedding"
61
- ]
62
- )
63
- out = self.lp_layer(x)
64
- if self.act is not None:
65
- out = self.act(out)
66
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/pl_utils.py DELETED
@@ -1,1618 +0,0 @@
1
- import matplotlib
2
- from torch.nn import DataParallel
3
- from torch.nn.parallel import DistributedDataParallel
4
-
5
- matplotlib.use('Agg')
6
- import glob
7
- import itertools
8
- import subprocess
9
- import threading
10
- import traceback
11
-
12
- from pytorch_lightning.callbacks import GradientAccumulationScheduler
13
- from pytorch_lightning.callbacks import ModelCheckpoint
14
-
15
- from functools import wraps
16
- from torch.cuda._utils import _get_device_index
17
- import numpy as np
18
- import torch.optim
19
- import torch.utils.data
20
- import copy
21
- import logging
22
- import os
23
- import re
24
- import sys
25
- import torch
26
- import torch.distributed as dist
27
- import torch.multiprocessing as mp
28
- import tqdm
29
- from torch.optim.optimizer import Optimizer
30
-
31
-
32
- def get_a_var(obj): # pragma: no cover
33
- if isinstance(obj, torch.Tensor):
34
- return obj
35
-
36
- if isinstance(obj, list) or isinstance(obj, tuple):
37
- for result in map(get_a_var, obj):
38
- if isinstance(result, torch.Tensor):
39
- return result
40
- if isinstance(obj, dict):
41
- for result in map(get_a_var, obj.items()):
42
- if isinstance(result, torch.Tensor):
43
- return result
44
- return None
45
-
46
-
47
- def data_loader(fn):
48
- """
49
- Decorator to make any fx with this use the lazy property
50
- :param fn:
51
- :return:
52
- """
53
-
54
- wraps(fn)
55
- attr_name = '_lazy_' + fn.__name__
56
-
57
- def _get_data_loader(self):
58
- try:
59
- value = getattr(self, attr_name)
60
- except AttributeError:
61
- try:
62
- value = fn(self) # Lazy evaluation, done only once.
63
- if (
64
- value is not None and
65
- not isinstance(value, list) and
66
- fn.__name__ in ['test_dataloader', 'val_dataloader']
67
- ):
68
- value = [value]
69
- except AttributeError as e:
70
- # Guard against AttributeError suppression. (Issue #142)
71
- traceback.print_exc()
72
- error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e)
73
- raise RuntimeError(error) from e
74
- setattr(self, attr_name, value) # Memoize evaluation.
75
- return value
76
-
77
- return _get_data_loader
78
-
79
-
80
- def parallel_apply(modules, inputs, kwargs_tup=None, devices=None): # pragma: no cover
81
- r"""Applies each `module` in :attr:`modules` in parallel on arguments
82
- contained in :attr:`inputs` (positional) and :attr:`kwargs_tup` (keyword)
83
- on each of :attr:`devices`.
84
-
85
- Args:
86
- modules (Module): modules to be parallelized
87
- inputs (tensor): inputs to the modules
88
- devices (list of int or torch.device): CUDA devices
89
-
90
- :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and
91
- :attr:`devices` (if given) should all have same length. Moreover, each
92
- element of :attr:`inputs` can either be a single object as the only argument
93
- to a module, or a collection of positional arguments.
94
- """
95
- assert len(modules) == len(inputs)
96
- if kwargs_tup is not None:
97
- assert len(modules) == len(kwargs_tup)
98
- else:
99
- kwargs_tup = ({},) * len(modules)
100
- if devices is not None:
101
- assert len(modules) == len(devices)
102
- else:
103
- devices = [None] * len(modules)
104
- devices = list(map(lambda x: _get_device_index(x, True), devices))
105
- lock = threading.Lock()
106
- results = {}
107
- grad_enabled = torch.is_grad_enabled()
108
-
109
- def _worker(i, module, input, kwargs, device=None):
110
- torch.set_grad_enabled(grad_enabled)
111
- if device is None:
112
- device = get_a_var(input).get_device()
113
- try:
114
- with torch.cuda.device(device):
115
- # this also avoids accidental slicing of `input` if it is a Tensor
116
- if not isinstance(input, (list, tuple)):
117
- input = (input,)
118
-
119
- # ---------------
120
- # CHANGE
121
- if module.training:
122
- output = module.training_step(*input, **kwargs)
123
-
124
- elif module.testing:
125
- output = module.test_step(*input, **kwargs)
126
-
127
- else:
128
- output = module.validation_step(*input, **kwargs)
129
- # ---------------
130
-
131
- with lock:
132
- results[i] = output
133
- except Exception as e:
134
- with lock:
135
- results[i] = e
136
-
137
- # make sure each module knows what training state it's in...
138
- # fixes weird bug where copies are out of sync
139
- root_m = modules[0]
140
- for m in modules[1:]:
141
- m.training = root_m.training
142
- m.testing = root_m.testing
143
-
144
- if len(modules) > 1:
145
- threads = [threading.Thread(target=_worker,
146
- args=(i, module, input, kwargs, device))
147
- for i, (module, input, kwargs, device) in
148
- enumerate(zip(modules, inputs, kwargs_tup, devices))]
149
-
150
- for thread in threads:
151
- thread.start()
152
- for thread in threads:
153
- thread.join()
154
- else:
155
- _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0])
156
-
157
- outputs = []
158
- for i in range(len(inputs)):
159
- output = results[i]
160
- if isinstance(output, Exception):
161
- raise output
162
- outputs.append(output)
163
- return outputs
164
-
165
-
166
- def _find_tensors(obj): # pragma: no cover
167
- r"""
168
- Recursively find all tensors contained in the specified object.
169
- """
170
- if isinstance(obj, torch.Tensor):
171
- return [obj]
172
- if isinstance(obj, (list, tuple)):
173
- return itertools.chain(*map(_find_tensors, obj))
174
- if isinstance(obj, dict):
175
- return itertools.chain(*map(_find_tensors, obj.values()))
176
- return []
177
-
178
-
179
- class DDP(DistributedDataParallel):
180
- """
181
- Override the forward call in lightning so it goes to training and validation step respectively
182
- """
183
-
184
- def parallel_apply(self, replicas, inputs, kwargs):
185
- return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
186
-
187
- def forward(self, *inputs, **kwargs): # pragma: no cover
188
- self._sync_params()
189
- if self.device_ids:
190
- inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
191
- if len(self.device_ids) == 1:
192
- # --------------
193
- # LIGHTNING MOD
194
- # --------------
195
- # normal
196
- # output = self.module(*inputs[0], **kwargs[0])
197
- # lightning
198
- if self.module.training:
199
- output = self.module.training_step(*inputs[0], **kwargs[0])
200
- elif self.module.testing:
201
- output = self.module.test_step(*inputs[0], **kwargs[0])
202
- else:
203
- output = self.module.validation_step(*inputs[0], **kwargs[0])
204
- else:
205
- outputs = self.parallel_apply(self._module_copies[:len(inputs)], inputs, kwargs)
206
- output = self.gather(outputs, self.output_device)
207
- else:
208
- # normal
209
- output = self.module(*inputs, **kwargs)
210
-
211
- if torch.is_grad_enabled():
212
- # We'll return the output object verbatim since it is a freeform
213
- # object. We need to find any tensors in this object, though,
214
- # because we need to figure out which parameters were used during
215
- # this forward pass, to ensure we short circuit reduction for any
216
- # unused parameters. Only if `find_unused_parameters` is set.
217
- if self.find_unused_parameters:
218
- self.reducer.prepare_for_backward(list(_find_tensors(output)))
219
- else:
220
- self.reducer.prepare_for_backward([])
221
- return output
222
-
223
-
224
- class DP(DataParallel):
225
- """
226
- Override the forward call in lightning so it goes to training and validation step respectively
227
- """
228
-
229
- def forward(self, *inputs, **kwargs):
230
- if not self.device_ids:
231
- return self.module(*inputs, **kwargs)
232
-
233
- for t in itertools.chain(self.module.parameters(), self.module.buffers()):
234
- if t.device != self.src_device_obj:
235
- raise RuntimeError("module must have its parameters and buffers "
236
- "on device {} (device_ids[0]) but found one of "
237
- "them on device: {}".format(self.src_device_obj, t.device))
238
-
239
- inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
240
- if len(self.device_ids) == 1:
241
- # lightning
242
- if self.module.training:
243
- return self.module.training_step(*inputs[0], **kwargs[0])
244
- elif self.module.testing:
245
- return self.module.test_step(*inputs[0], **kwargs[0])
246
- else:
247
- return self.module.validation_step(*inputs[0], **kwargs[0])
248
-
249
- replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
250
- outputs = self.parallel_apply(replicas, inputs, kwargs)
251
- return self.gather(outputs, self.output_device)
252
-
253
- def parallel_apply(self, replicas, inputs, kwargs):
254
- return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
255
-
256
-
257
- class GradientAccumulationScheduler:
258
- def __init__(self, scheduling: dict):
259
- if scheduling == {}: # empty dict error
260
- raise TypeError("Empty dict cannot be interpreted correct")
261
-
262
- for key in scheduling.keys():
263
- if not isinstance(key, int) or not isinstance(scheduling[key], int):
264
- raise TypeError("All epoches and accumulation factor must be integers")
265
-
266
- minimal_epoch = min(scheduling.keys())
267
- if minimal_epoch < 1:
268
- msg = f"Epochs indexing from 1, epoch {minimal_epoch} cannot be interpreted correct"
269
- raise IndexError(msg)
270
- elif minimal_epoch != 1: # if user didnt define first epoch accumulation factor
271
- scheduling.update({1: 1})
272
-
273
- self.scheduling = scheduling
274
- self.epochs = sorted(scheduling.keys())
275
-
276
- def on_epoch_begin(self, epoch, trainer):
277
- epoch += 1 # indexing epochs from 1
278
- for i in reversed(range(len(self.epochs))):
279
- if epoch >= self.epochs[i]:
280
- trainer.accumulate_grad_batches = self.scheduling.get(self.epochs[i])
281
- break
282
-
283
-
284
- class LatestModelCheckpoint(ModelCheckpoint):
285
- def __init__(self, filepath, monitor='val_loss', verbose=0, num_ckpt_keep=5,
286
- save_weights_only=False, mode='auto', period=1, prefix='model', save_best=True):
287
- super(ModelCheckpoint, self).__init__()
288
- self.monitor = monitor
289
- self.verbose = verbose
290
- self.filepath = filepath
291
- os.makedirs(filepath, exist_ok=True)
292
- self.num_ckpt_keep = num_ckpt_keep
293
- self.save_best = save_best
294
- self.save_weights_only = save_weights_only
295
- self.period = period
296
- self.epochs_since_last_check = 0
297
- self.prefix = prefix
298
- self.best_k_models = {}
299
- # {filename: monitor}
300
- self.kth_best_model = ''
301
- self.save_top_k = 1
302
- self.task = None
303
- if mode == 'min':
304
- self.monitor_op = np.less
305
- self.best = np.Inf
306
- self.mode = 'min'
307
- elif mode == 'max':
308
- self.monitor_op = np.greater
309
- self.best = -np.Inf
310
- self.mode = 'max'
311
- else:
312
- if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
313
- self.monitor_op = np.greater
314
- self.best = -np.Inf
315
- self.mode = 'max'
316
- else:
317
- self.monitor_op = np.less
318
- self.best = np.Inf
319
- self.mode = 'min'
320
- if os.path.exists(f'{self.filepath}/best_valid.npy'):
321
- self.best = np.load(f'{self.filepath}/best_valid.npy')[0]
322
-
323
- def get_all_ckpts(self):
324
- return sorted(glob.glob(f'{self.filepath}/{self.prefix}_ckpt_steps_*.ckpt'),
325
- key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
326
-
327
- def on_epoch_end(self, epoch, logs=None):
328
- logs = logs or {}
329
- self.epochs_since_last_check += 1
330
- best_filepath = f'{self.filepath}/{self.prefix}_ckpt_best.pt'
331
- if self.epochs_since_last_check >= self.period:
332
- self.epochs_since_last_check = 0
333
- filepath = f'{self.filepath}/{self.prefix}_ckpt_steps_{self.task.global_step}.ckpt'
334
- if self.verbose > 0:
335
- logging.info(f'Epoch {epoch:05d}@{self.task.global_step}: saving model to {filepath}')
336
- self._save_model(filepath)
337
- for old_ckpt in self.get_all_ckpts()[self.num_ckpt_keep:]:
338
- subprocess.check_call(f'rm -rf "{old_ckpt}"', shell=True)
339
- if self.verbose > 0:
340
- logging.info(f'Delete ckpt: {os.path.basename(old_ckpt)}')
341
- current = logs.get(self.monitor)
342
- if current is not None and self.save_best:
343
- if self.monitor_op(current, self.best):
344
- self.best = current
345
- if self.verbose > 0:
346
- logging.info(
347
- f'Epoch {epoch:05d}@{self.task.global_step}: {self.monitor} reached'
348
- f' {current:0.5f} (best {self.best:0.5f}), saving model to'
349
- f' {best_filepath} as top 1')
350
- self._save_model(best_filepath)
351
- np.save(f'{self.filepath}/best_valid.npy', [self.best])
352
-
353
-
354
- class BaseTrainer:
355
- def __init__(
356
- self,
357
- logger=True,
358
- checkpoint_callback=True,
359
- default_save_path=None,
360
- gradient_clip_val=0,
361
- process_position=0,
362
- gpus=-1,
363
- log_gpu_memory=None,
364
- show_progress_bar=True,
365
- track_grad_norm=-1,
366
- check_val_every_n_epoch=1,
367
- accumulate_grad_batches=1,
368
- max_updates=1000,
369
- min_epochs=1,
370
- val_check_interval=1.0,
371
- log_save_interval=100,
372
- row_log_interval=10,
373
- print_nan_grads=False,
374
- weights_summary='full',
375
- num_sanity_val_steps=5,
376
- resume_from_checkpoint=None,
377
- ):
378
- self.log_gpu_memory = log_gpu_memory
379
- self.gradient_clip_val = gradient_clip_val
380
- self.check_val_every_n_epoch = check_val_every_n_epoch
381
- self.track_grad_norm = track_grad_norm
382
- self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
383
- self.process_position = process_position
384
- self.weights_summary = weights_summary
385
- self.max_updates = max_updates
386
- self.min_epochs = min_epochs
387
- self.num_sanity_val_steps = num_sanity_val_steps
388
- self.print_nan_grads = print_nan_grads
389
- self.resume_from_checkpoint = resume_from_checkpoint
390
- self.default_save_path = default_save_path
391
-
392
- # training bookeeping
393
- self.total_batch_idx = 0
394
- self.running_loss = []
395
- self.avg_loss = 0
396
- self.batch_idx = 0
397
- self.tqdm_metrics = {}
398
- self.callback_metrics = {}
399
- self.num_val_batches = 0
400
- self.num_training_batches = 0
401
- self.num_test_batches = 0
402
- self.get_train_dataloader = None
403
- self.get_test_dataloaders = None
404
- self.get_val_dataloaders = None
405
- self.is_iterable_train_dataloader = False
406
-
407
- # training state
408
- self.model = None
409
- self.testing = False
410
- self.disable_validation = False
411
- self.lr_schedulers = []
412
- self.optimizers = None
413
- self.global_step = 0
414
- self.current_epoch = 0
415
- self.total_batches = 0
416
-
417
- # configure checkpoint callback
418
- self.checkpoint_callback = checkpoint_callback
419
- self.checkpoint_callback.save_function = self.save_checkpoint
420
- self.weights_save_path = self.checkpoint_callback.filepath
421
-
422
- # accumulated grads
423
- self.configure_accumulated_gradients(accumulate_grad_batches)
424
-
425
- # allow int, string and gpu list
426
- self.data_parallel_device_ids = [
427
- int(x) for x in os.environ.get("CUDA_VISIBLE_DEVICES", "").split(",") if x != '']
428
- if len(self.data_parallel_device_ids) == 0:
429
- self.root_gpu = None
430
- self.on_gpu = False
431
- else:
432
- self.root_gpu = self.data_parallel_device_ids[0]
433
- self.on_gpu = True
434
-
435
- # distributed backend choice
436
- self.use_ddp = False
437
- self.use_dp = False
438
- self.single_gpu = False
439
- self.distributed_backend = 'ddp' if self.num_gpus > 0 else 'dp'
440
- self.set_distributed_mode(self.distributed_backend)
441
-
442
- self.proc_rank = 0
443
- self.world_size = 1
444
- self.node_rank = 0
445
-
446
- # can't init progress bar here because starting a new process
447
- # means the progress_bar won't survive pickling
448
- self.show_progress_bar = show_progress_bar
449
-
450
- # logging
451
- self.log_save_interval = log_save_interval
452
- self.val_check_interval = val_check_interval
453
- self.logger = logger
454
- self.logger.rank = 0
455
- self.row_log_interval = row_log_interval
456
-
457
- @property
458
- def num_gpus(self):
459
- gpus = self.data_parallel_device_ids
460
- if gpus is None:
461
- return 0
462
- else:
463
- return len(gpus)
464
-
465
- @property
466
- def data_parallel(self):
467
- return self.use_dp or self.use_ddp
468
-
469
- def get_model(self):
470
- is_dp_module = isinstance(self.model, (DDP, DP))
471
- model = self.model.module if is_dp_module else self.model
472
- return model
473
-
474
- # -----------------------------
475
- # MODEL TRAINING
476
- # -----------------------------
477
- def fit(self, model):
478
- if self.use_ddp:
479
- mp.spawn(self.ddp_train, nprocs=self.num_gpus, args=(model,))
480
- else:
481
- model.model = model.build_model()
482
- if not self.testing:
483
- self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
484
- if self.use_dp:
485
- model.cuda(self.root_gpu)
486
- model = DP(model, device_ids=self.data_parallel_device_ids)
487
- elif self.single_gpu:
488
- model.cuda(self.root_gpu)
489
- self.run_pretrain_routine(model)
490
- return 1
491
-
492
- def init_optimizers(self, optimizers):
493
-
494
- # single optimizer
495
- if isinstance(optimizers, Optimizer):
496
- return [optimizers], []
497
-
498
- # two lists
499
- elif len(optimizers) == 2 and isinstance(optimizers[0], list):
500
- optimizers, lr_schedulers = optimizers
501
- return optimizers, lr_schedulers
502
-
503
- # single list or tuple
504
- elif isinstance(optimizers, list) or isinstance(optimizers, tuple):
505
- return optimizers, []
506
-
507
- def run_pretrain_routine(self, model):
508
- """Sanity check a few things before starting actual training.
509
-
510
- :param model:
511
- """
512
- ref_model = model
513
- if self.data_parallel:
514
- ref_model = model.module
515
-
516
- # give model convenience properties
517
- ref_model.trainer = self
518
-
519
- # set local properties on the model
520
- self.copy_trainer_model_properties(ref_model)
521
-
522
- # link up experiment object
523
- if self.logger is not None:
524
- ref_model.logger = self.logger
525
- self.logger.save()
526
-
527
- if self.use_ddp:
528
- dist.barrier()
529
-
530
- # set up checkpoint callback
531
- # self.configure_checkpoint_callback()
532
-
533
- # transfer data loaders from model
534
- self.get_dataloaders(ref_model)
535
-
536
- # track model now.
537
- # if cluster resets state, the model will update with the saved weights
538
- self.model = model
539
-
540
- # restore training and model before hpc call
541
- self.restore_weights(model)
542
-
543
- # when testing requested only run test and return
544
- if self.testing:
545
- self.run_evaluation(test=True)
546
- return
547
-
548
- # check if we should run validation during training
549
- self.disable_validation = self.num_val_batches == 0
550
-
551
- # run tiny validation (if validation defined)
552
- # to make sure program won't crash during val
553
- ref_model.on_sanity_check_start()
554
- ref_model.on_train_start()
555
- if not self.disable_validation and self.num_sanity_val_steps > 0:
556
- # init progress bars for validation sanity check
557
- pbar = tqdm.tqdm(desc='Validation sanity check',
558
- total=self.num_sanity_val_steps * len(self.get_val_dataloaders()),
559
- leave=False, position=2 * self.process_position,
560
- disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch')
561
- self.main_progress_bar = pbar
562
- # dummy validation progress bar
563
- self.val_progress_bar = tqdm.tqdm(disable=True)
564
-
565
- self.evaluate(model, self.get_val_dataloaders(), self.num_sanity_val_steps, self.testing)
566
-
567
- # close progress bars
568
- self.main_progress_bar.close()
569
- self.val_progress_bar.close()
570
-
571
- # init progress bar
572
- pbar = tqdm.tqdm(leave=True, position=2 * self.process_position,
573
- disable=not self.show_progress_bar, dynamic_ncols=True, unit='batch',
574
- file=sys.stdout)
575
- self.main_progress_bar = pbar
576
-
577
- # clear cache before training
578
- if self.on_gpu:
579
- torch.cuda.empty_cache()
580
-
581
- # CORE TRAINING LOOP
582
- self.train()
583
-
584
- def test(self, model):
585
- self.testing = True
586
- self.fit(model)
587
-
588
- @property
589
- def training_tqdm_dict(self):
590
- tqdm_dict = {
591
- 'step': '{}'.format(self.global_step),
592
- }
593
- tqdm_dict.update(self.tqdm_metrics)
594
- return tqdm_dict
595
-
596
- # --------------------
597
- # restore ckpt
598
- # --------------------
599
- def restore_weights(self, model):
600
- """
601
- To restore weights we have two cases.
602
- First, attempt to restore hpc weights. If successful, don't restore
603
- other weights.
604
-
605
- Otherwise, try to restore actual weights
606
- :param model:
607
- :return:
608
- """
609
- # clear cache before restore
610
- if self.on_gpu:
611
- torch.cuda.empty_cache()
612
-
613
- if self.resume_from_checkpoint is not None:
614
- self.restore(self.resume_from_checkpoint, on_gpu=self.on_gpu)
615
- else:
616
- # restore weights if same exp version
617
- self.restore_state_if_checkpoint_exists(model)
618
-
619
- # wait for all models to restore weights
620
- if self.use_ddp:
621
- # wait for all processes to catch up
622
- dist.barrier()
623
-
624
- # clear cache after restore
625
- if self.on_gpu:
626
- torch.cuda.empty_cache()
627
-
628
- def restore_state_if_checkpoint_exists(self, model):
629
- did_restore = False
630
-
631
- # do nothing if there's not dir or callback
632
- no_ckpt_callback = (self.checkpoint_callback is None) or (not self.checkpoint_callback)
633
- if no_ckpt_callback or not os.path.exists(self.checkpoint_callback.filepath):
634
- return did_restore
635
-
636
- # restore trainer state and model if there is a weight for this experiment
637
- last_steps = -1
638
- last_ckpt_name = None
639
-
640
- # find last epoch
641
- checkpoints = os.listdir(self.checkpoint_callback.filepath)
642
- for name in checkpoints:
643
- if '.ckpt' in name and not name.endswith('part'):
644
- if 'steps_' in name:
645
- steps = name.split('steps_')[1]
646
- steps = int(re.sub('[^0-9]', '', steps))
647
-
648
- if steps > last_steps:
649
- last_steps = steps
650
- last_ckpt_name = name
651
-
652
- # restore last checkpoint
653
- if last_ckpt_name is not None:
654
- last_ckpt_path = os.path.join(self.checkpoint_callback.filepath, last_ckpt_name)
655
- self.restore(last_ckpt_path, self.on_gpu)
656
- logging.info(f'model and trainer restored from checkpoint: {last_ckpt_path}')
657
- did_restore = True
658
-
659
- return did_restore
660
-
661
- def restore(self, checkpoint_path, on_gpu):
662
- checkpoint = torch.load(checkpoint_path, map_location='cpu')
663
-
664
- # load model state
665
- model = self.get_model()
666
-
667
- # load the state_dict on the model automatically
668
- model.load_state_dict(checkpoint['state_dict'], strict=False)
669
- if on_gpu:
670
- model.cuda(self.root_gpu)
671
- # load training state (affects trainer only)
672
- self.restore_training_state(checkpoint)
673
- model.global_step = self.global_step
674
- del checkpoint
675
-
676
- try:
677
- if dist.is_initialized() and dist.get_rank() > 0:
678
- return
679
- except Exception as e:
680
- print(e)
681
- return
682
-
683
- def restore_training_state(self, checkpoint):
684
- """
685
- Restore trainer state.
686
- Model will get its change to update
687
- :param checkpoint:
688
- :return:
689
- """
690
- if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
691
- self.checkpoint_callback.best = checkpoint['checkpoint_callback_best']
692
-
693
- self.global_step = checkpoint['global_step']
694
- self.current_epoch = checkpoint['epoch']
695
-
696
- if self.testing:
697
- return
698
-
699
- # restore the optimizers
700
- optimizer_states = checkpoint['optimizer_states']
701
- for optimizer, opt_state in zip(self.optimizers, optimizer_states):
702
- if optimizer is None:
703
- return
704
- optimizer.load_state_dict(opt_state)
705
-
706
- # move optimizer to GPU 1 weight at a time
707
- # avoids OOM
708
- if self.root_gpu is not None:
709
- for state in optimizer.state.values():
710
- for k, v in state.items():
711
- if isinstance(v, torch.Tensor):
712
- state[k] = v.cuda(self.root_gpu)
713
-
714
- # restore the lr schedulers
715
- lr_schedulers = checkpoint['lr_schedulers']
716
- for scheduler, lrs_state in zip(self.lr_schedulers, lr_schedulers):
717
- scheduler.load_state_dict(lrs_state)
718
-
719
- # --------------------
720
- # MODEL SAVE CHECKPOINT
721
- # --------------------
722
- def _atomic_save(self, checkpoint, filepath):
723
- """Saves a checkpoint atomically, avoiding the creation of incomplete checkpoints.
724
-
725
- This will create a temporary checkpoint with a suffix of ``.part``, then copy it to the final location once
726
- saving is finished.
727
-
728
- Args:
729
- checkpoint (object): The object to save.
730
- Built to be used with the ``dump_checkpoint`` method, but can deal with anything which ``torch.save``
731
- accepts.
732
- filepath (str|pathlib.Path): The path to which the checkpoint will be saved.
733
- This points to the file that the checkpoint will be stored in.
734
- """
735
- tmp_path = str(filepath) + ".part"
736
- torch.save(checkpoint, tmp_path)
737
- os.replace(tmp_path, filepath)
738
-
739
- def save_checkpoint(self, filepath):
740
- checkpoint = self.dump_checkpoint()
741
- self._atomic_save(checkpoint, filepath)
742
-
743
- def dump_checkpoint(self):
744
-
745
- checkpoint = {
746
- 'epoch': self.current_epoch,
747
- 'global_step': self.global_step
748
- }
749
-
750
- if self.checkpoint_callback is not None and self.checkpoint_callback is not False:
751
- checkpoint['checkpoint_callback_best'] = self.checkpoint_callback.best
752
-
753
- # save optimizers
754
- optimizer_states = []
755
- for i, optimizer in enumerate(self.optimizers):
756
- if optimizer is not None:
757
- optimizer_states.append(optimizer.state_dict())
758
-
759
- checkpoint['optimizer_states'] = optimizer_states
760
-
761
- # save lr schedulers
762
- lr_schedulers = []
763
- for i, scheduler in enumerate(self.lr_schedulers):
764
- lr_schedulers.append(scheduler.state_dict())
765
-
766
- checkpoint['lr_schedulers'] = lr_schedulers
767
-
768
- # add the hparams and state_dict from the model
769
- model = self.get_model()
770
- checkpoint['state_dict'] = model.state_dict()
771
- # give the model a chance to add a few things
772
- model.on_save_checkpoint(checkpoint)
773
-
774
- return checkpoint
775
-
776
- def copy_trainer_model_properties(self, model):
777
- if isinstance(model, DP):
778
- ref_model = model.module
779
- elif isinstance(model, DDP):
780
- ref_model = model.module
781
- else:
782
- ref_model = model
783
-
784
- for m in [model, ref_model]:
785
- m.trainer = self
786
- m.on_gpu = self.on_gpu
787
- m.use_dp = self.use_dp
788
- m.use_ddp = self.use_ddp
789
- m.testing = self.testing
790
- m.single_gpu = self.single_gpu
791
-
792
- def transfer_batch_to_gpu(self, batch, gpu_id):
793
- # base case: object can be directly moved using `cuda` or `to`
794
- if callable(getattr(batch, 'cuda', None)):
795
- return batch.cuda(gpu_id, non_blocking=True)
796
-
797
- elif callable(getattr(batch, 'to', None)):
798
- return batch.to(torch.device('cuda', gpu_id), non_blocking=True)
799
-
800
- # when list
801
- elif isinstance(batch, list):
802
- for i, x in enumerate(batch):
803
- batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
804
- return batch
805
-
806
- # when tuple
807
- elif isinstance(batch, tuple):
808
- batch = list(batch)
809
- for i, x in enumerate(batch):
810
- batch[i] = self.transfer_batch_to_gpu(x, gpu_id)
811
- return tuple(batch)
812
-
813
- # when dict
814
- elif isinstance(batch, dict):
815
- for k, v in batch.items():
816
- batch[k] = self.transfer_batch_to_gpu(v, gpu_id)
817
-
818
- return batch
819
-
820
- # nothing matches, return the value as is without transform
821
- return batch
822
-
823
- def set_distributed_mode(self, distributed_backend):
824
- # skip for CPU
825
- if self.num_gpus == 0:
826
- return
827
-
828
- # single GPU case
829
- # in single gpu case we allow ddp so we can train on multiple
830
- # nodes, 1 gpu per node
831
- elif self.num_gpus == 1:
832
- self.single_gpu = True
833
- self.use_dp = False
834
- self.use_ddp = False
835
- self.root_gpu = 0
836
- self.data_parallel_device_ids = [0]
837
- else:
838
- if distributed_backend is not None:
839
- self.use_dp = distributed_backend == 'dp'
840
- self.use_ddp = distributed_backend == 'ddp'
841
- elif distributed_backend is None:
842
- self.use_dp = True
843
- self.use_ddp = False
844
-
845
- logging.info(f'gpu available: {torch.cuda.is_available()}, used: {self.on_gpu}')
846
-
847
- def ddp_train(self, gpu_idx, model):
848
- """
849
- Entry point into a DP thread
850
- :param gpu_idx:
851
- :param model:
852
- :param cluster_obj:
853
- :return:
854
- """
855
- # otherwise default to node rank 0
856
- self.node_rank = 0
857
-
858
- # show progressbar only on progress_rank 0
859
- self.show_progress_bar = self.show_progress_bar and self.node_rank == 0 and gpu_idx == 0
860
-
861
- # determine which process we are and world size
862
- if self.use_ddp:
863
- self.proc_rank = self.node_rank * self.num_gpus + gpu_idx
864
- self.world_size = self.num_gpus
865
-
866
- # let the exp know the rank to avoid overwriting logs
867
- if self.logger is not None:
868
- self.logger.rank = self.proc_rank
869
-
870
- # set up server using proc 0's ip address
871
- # try to init for 20 times at max in case ports are taken
872
- # where to store ip_table
873
- model.trainer = self
874
- model.init_ddp_connection(self.proc_rank, self.world_size)
875
-
876
- # CHOOSE OPTIMIZER
877
- # allow for lr schedulers as well
878
- model.model = model.build_model()
879
- if not self.testing:
880
- self.optimizers, self.lr_schedulers = self.init_optimizers(model.configure_optimizers())
881
-
882
- # MODEL
883
- # copy model to each gpu
884
- if self.distributed_backend == 'ddp':
885
- torch.cuda.set_device(gpu_idx)
886
- model.cuda(gpu_idx)
887
-
888
- # set model properties before going into wrapper
889
- self.copy_trainer_model_properties(model)
890
-
891
- # override root GPU
892
- self.root_gpu = gpu_idx
893
-
894
- if self.distributed_backend == 'ddp':
895
- device_ids = [gpu_idx]
896
- else:
897
- device_ids = None
898
-
899
- # allow user to configure ddp
900
- model = model.configure_ddp(model, device_ids)
901
-
902
- # continue training routine
903
- self.run_pretrain_routine(model)
904
-
905
- def resolve_root_node_address(self, root_node):
906
- if '[' in root_node:
907
- name = root_node.split('[')[0]
908
- number = root_node.split(',')[0]
909
- if '-' in number:
910
- number = number.split('-')[0]
911
-
912
- number = re.sub('[^0-9]', '', number)
913
- root_node = name + number
914
-
915
- return root_node
916
-
917
- def log_metrics(self, metrics, grad_norm_dic, step=None):
918
- """Logs the metric dict passed in.
919
-
920
- :param metrics:
921
- :param grad_norm_dic:
922
- """
923
- # added metrics by Lightning for convenience
924
- metrics['epoch'] = self.current_epoch
925
-
926
- # add norms
927
- metrics.update(grad_norm_dic)
928
-
929
- # turn all tensors to scalars
930
- scalar_metrics = self.metrics_to_scalars(metrics)
931
-
932
- step = step if step is not None else self.global_step
933
- # log actual metrics
934
- if self.proc_rank == 0 and self.logger is not None:
935
- self.logger.log_metrics(scalar_metrics, step=step)
936
- self.logger.save()
937
-
938
- def add_tqdm_metrics(self, metrics):
939
- for k, v in metrics.items():
940
- if type(v) is torch.Tensor:
941
- v = v.item()
942
-
943
- self.tqdm_metrics[k] = v
944
-
945
- def metrics_to_scalars(self, metrics):
946
- new_metrics = {}
947
- for k, v in metrics.items():
948
- if isinstance(v, torch.Tensor):
949
- v = v.item()
950
-
951
- if type(v) is dict:
952
- v = self.metrics_to_scalars(v)
953
-
954
- new_metrics[k] = v
955
-
956
- return new_metrics
957
-
958
- def process_output(self, output, train=False):
959
- """Reduces output according to the training mode.
960
-
961
- Separates loss from logging and tqdm metrics
962
- :param output:
963
- :return:
964
- """
965
- # ---------------
966
- # EXTRACT CALLBACK KEYS
967
- # ---------------
968
- # all keys not progress_bar or log are candidates for callbacks
969
- callback_metrics = {}
970
- for k, v in output.items():
971
- if k not in ['progress_bar', 'log', 'hiddens']:
972
- callback_metrics[k] = v
973
-
974
- if train and self.use_dp:
975
- num_gpus = self.num_gpus
976
- callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
977
-
978
- for k, v in callback_metrics.items():
979
- if isinstance(v, torch.Tensor):
980
- callback_metrics[k] = v.item()
981
-
982
- # ---------------
983
- # EXTRACT PROGRESS BAR KEYS
984
- # ---------------
985
- try:
986
- progress_output = output['progress_bar']
987
-
988
- # reduce progress metrics for tqdm when using dp
989
- if train and self.use_dp:
990
- num_gpus = self.num_gpus
991
- progress_output = self.reduce_distributed_output(progress_output, num_gpus)
992
-
993
- progress_bar_metrics = progress_output
994
- except Exception:
995
- progress_bar_metrics = {}
996
-
997
- # ---------------
998
- # EXTRACT LOGGING KEYS
999
- # ---------------
1000
- # extract metrics to log to experiment
1001
- try:
1002
- log_output = output['log']
1003
-
1004
- # reduce progress metrics for tqdm when using dp
1005
- if train and self.use_dp:
1006
- num_gpus = self.num_gpus
1007
- log_output = self.reduce_distributed_output(log_output, num_gpus)
1008
-
1009
- log_metrics = log_output
1010
- except Exception:
1011
- log_metrics = {}
1012
-
1013
- # ---------------
1014
- # EXTRACT LOSS
1015
- # ---------------
1016
- # if output dict doesn't have the keyword loss
1017
- # then assume the output=loss if scalar
1018
- loss = None
1019
- if train:
1020
- try:
1021
- loss = output['loss']
1022
- except Exception:
1023
- if type(output) is torch.Tensor:
1024
- loss = output
1025
- else:
1026
- raise RuntimeError(
1027
- 'No `loss` value in the dictionary returned from `model.training_step()`.'
1028
- )
1029
-
1030
- # when using dp need to reduce the loss
1031
- if self.use_dp:
1032
- loss = self.reduce_distributed_output(loss, self.num_gpus)
1033
-
1034
- # ---------------
1035
- # EXTRACT HIDDEN
1036
- # ---------------
1037
- hiddens = output.get('hiddens')
1038
-
1039
- # use every metric passed in as a candidate for callback
1040
- callback_metrics.update(progress_bar_metrics)
1041
- callback_metrics.update(log_metrics)
1042
-
1043
- # convert tensors to numpy
1044
- for k, v in callback_metrics.items():
1045
- if isinstance(v, torch.Tensor):
1046
- callback_metrics[k] = v.item()
1047
-
1048
- return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
1049
-
1050
- def reduce_distributed_output(self, output, num_gpus):
1051
- if num_gpus <= 1:
1052
- return output
1053
-
1054
- # when using DP, we get one output per gpu
1055
- # average outputs and return
1056
- if type(output) is torch.Tensor:
1057
- return output.mean()
1058
-
1059
- for k, v in output.items():
1060
- # recurse on nested dics
1061
- if isinstance(output[k], dict):
1062
- output[k] = self.reduce_distributed_output(output[k], num_gpus)
1063
-
1064
- # do nothing when there's a scalar
1065
- elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
1066
- pass
1067
-
1068
- # reduce only metrics that have the same number of gpus
1069
- elif output[k].size(0) == num_gpus:
1070
- reduced = torch.mean(output[k])
1071
- output[k] = reduced
1072
- return output
1073
-
1074
- def clip_gradients(self):
1075
- if self.gradient_clip_val > 0:
1076
- model = self.get_model()
1077
- torch.nn.utils.clip_grad_norm_(model.parameters(), self.gradient_clip_val)
1078
-
1079
- def print_nan_gradients(self):
1080
- model = self.get_model()
1081
- for param in model.parameters():
1082
- if (param.grad is not None) and torch.isnan(param.grad.float()).any():
1083
- logging.info(param, param.grad)
1084
-
1085
- def configure_accumulated_gradients(self, accumulate_grad_batches):
1086
- self.accumulate_grad_batches = None
1087
-
1088
- if isinstance(accumulate_grad_batches, dict):
1089
- self.accumulation_scheduler = GradientAccumulationScheduler(accumulate_grad_batches)
1090
- elif isinstance(accumulate_grad_batches, int):
1091
- schedule = {1: accumulate_grad_batches}
1092
- self.accumulation_scheduler = GradientAccumulationScheduler(schedule)
1093
- else:
1094
- raise TypeError("Gradient accumulation supports only int and dict types")
1095
-
1096
- def get_dataloaders(self, model):
1097
- if not self.testing:
1098
- self.init_train_dataloader(model)
1099
- self.init_val_dataloader(model)
1100
- else:
1101
- self.init_test_dataloader(model)
1102
-
1103
- if self.use_ddp:
1104
- dist.barrier()
1105
- if not self.testing:
1106
- self.get_train_dataloader()
1107
- self.get_val_dataloaders()
1108
- else:
1109
- self.get_test_dataloaders()
1110
-
1111
- def init_train_dataloader(self, model):
1112
- self.fisrt_epoch = True
1113
- self.get_train_dataloader = model.train_dataloader
1114
- if isinstance(self.get_train_dataloader(), torch.utils.data.DataLoader):
1115
- self.num_training_batches = len(self.get_train_dataloader())
1116
- self.num_training_batches = int(self.num_training_batches)
1117
- else:
1118
- self.num_training_batches = float('inf')
1119
- self.is_iterable_train_dataloader = True
1120
- if isinstance(self.val_check_interval, int):
1121
- self.val_check_batch = self.val_check_interval
1122
- else:
1123
- self._percent_range_check('val_check_interval')
1124
- self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
1125
- self.val_check_batch = max(1, self.val_check_batch)
1126
-
1127
- def init_val_dataloader(self, model):
1128
- self.get_val_dataloaders = model.val_dataloader
1129
- self.num_val_batches = 0
1130
- if self.get_val_dataloaders() is not None:
1131
- if isinstance(self.get_val_dataloaders()[0], torch.utils.data.DataLoader):
1132
- self.num_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
1133
- self.num_val_batches = int(self.num_val_batches)
1134
- else:
1135
- self.num_val_batches = float('inf')
1136
-
1137
- def init_test_dataloader(self, model):
1138
- self.get_test_dataloaders = model.test_dataloader
1139
- if self.get_test_dataloaders() is not None:
1140
- if isinstance(self.get_test_dataloaders()[0], torch.utils.data.DataLoader):
1141
- self.num_test_batches = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
1142
- self.num_test_batches = int(self.num_test_batches)
1143
- else:
1144
- self.num_test_batches = float('inf')
1145
-
1146
- def evaluate(self, model, dataloaders, max_batches, test=False):
1147
- """Run evaluation code.
1148
-
1149
- :param model: PT model
1150
- :param dataloaders: list of PT dataloaders
1151
- :param max_batches: Scalar
1152
- :param test: boolean
1153
- :return:
1154
- """
1155
- # enable eval mode
1156
- model.zero_grad()
1157
- model.eval()
1158
-
1159
- # copy properties for forward overrides
1160
- self.copy_trainer_model_properties(model)
1161
-
1162
- # disable gradients to save memory
1163
- torch.set_grad_enabled(False)
1164
-
1165
- if test:
1166
- self.get_model().test_start()
1167
- # bookkeeping
1168
- outputs = []
1169
-
1170
- # run training
1171
- for dataloader_idx, dataloader in enumerate(dataloaders):
1172
- dl_outputs = []
1173
- for batch_idx, batch in enumerate(dataloader):
1174
-
1175
- if batch is None: # pragma: no cover
1176
- continue
1177
-
1178
- # stop short when on fast_dev_run (sets max_batch=1)
1179
- if batch_idx >= max_batches:
1180
- break
1181
-
1182
- # -----------------
1183
- # RUN EVALUATION STEP
1184
- # -----------------
1185
- output = self.evaluation_forward(model,
1186
- batch,
1187
- batch_idx,
1188
- dataloader_idx,
1189
- test)
1190
-
1191
- # track outputs for collation
1192
- dl_outputs.append(output)
1193
-
1194
- # batch done
1195
- if test:
1196
- self.test_progress_bar.update(1)
1197
- else:
1198
- self.val_progress_bar.update(1)
1199
- outputs.append(dl_outputs)
1200
-
1201
- # with a single dataloader don't pass an array
1202
- if len(dataloaders) == 1:
1203
- outputs = outputs[0]
1204
-
1205
- # give model a chance to do something with the outputs (and method defined)
1206
- model = self.get_model()
1207
- if test:
1208
- eval_results_ = model.test_end(outputs)
1209
- else:
1210
- eval_results_ = model.validation_end(outputs)
1211
- eval_results = eval_results_
1212
-
1213
- # enable train mode again
1214
- model.train()
1215
-
1216
- # enable gradients to save memory
1217
- torch.set_grad_enabled(True)
1218
-
1219
- return eval_results
1220
-
1221
- def run_evaluation(self, test=False):
1222
- # when testing make sure user defined a test step
1223
- model = self.get_model()
1224
- model.on_pre_performance_check()
1225
-
1226
- # select dataloaders
1227
- if test:
1228
- dataloaders = self.get_test_dataloaders()
1229
- max_batches = self.num_test_batches
1230
- else:
1231
- # val
1232
- dataloaders = self.get_val_dataloaders()
1233
- max_batches = self.num_val_batches
1234
-
1235
- # init validation or test progress bar
1236
- # main progress bar will already be closed when testing so initial position is free
1237
- position = 2 * self.process_position + (not test)
1238
- desc = 'Testing' if test else 'Validating'
1239
- pbar = tqdm.tqdm(desc=desc, total=max_batches, leave=test, position=position,
1240
- disable=not self.show_progress_bar, dynamic_ncols=True,
1241
- unit='batch', file=sys.stdout)
1242
- setattr(self, f'{"test" if test else "val"}_progress_bar', pbar)
1243
-
1244
- # run evaluation
1245
- eval_results = self.evaluate(self.model,
1246
- dataloaders,
1247
- max_batches,
1248
- test)
1249
- if eval_results is not None:
1250
- _, prog_bar_metrics, log_metrics, callback_metrics, _ = self.process_output(
1251
- eval_results)
1252
-
1253
- # add metrics to prog bar
1254
- self.add_tqdm_metrics(prog_bar_metrics)
1255
-
1256
- # log metrics
1257
- self.log_metrics(log_metrics, {})
1258
-
1259
- # track metrics for callbacks
1260
- self.callback_metrics.update(callback_metrics)
1261
-
1262
- # hook
1263
- model.on_post_performance_check()
1264
-
1265
- # add model specific metrics
1266
- tqdm_metrics = self.training_tqdm_dict
1267
- if not test:
1268
- self.main_progress_bar.set_postfix(**tqdm_metrics)
1269
-
1270
- # close progress bar
1271
- if test:
1272
- self.test_progress_bar.close()
1273
- else:
1274
- self.val_progress_bar.close()
1275
-
1276
- # model checkpointing
1277
- if self.proc_rank == 0 and self.checkpoint_callback is not None and not test:
1278
- self.checkpoint_callback.on_epoch_end(epoch=self.current_epoch,
1279
- logs=self.callback_metrics)
1280
-
1281
- def evaluation_forward(self, model, batch, batch_idx, dataloader_idx, test=False):
1282
- # make dataloader_idx arg in validation_step optional
1283
- args = [batch, batch_idx]
1284
-
1285
- if test and len(self.get_test_dataloaders()) > 1:
1286
- args.append(dataloader_idx)
1287
-
1288
- elif not test and len(self.get_val_dataloaders()) > 1:
1289
- args.append(dataloader_idx)
1290
-
1291
- # handle DP, DDP forward
1292
- if self.use_ddp or self.use_dp:
1293
- output = model(*args)
1294
- return output
1295
-
1296
- # single GPU
1297
- if self.single_gpu:
1298
- # for single GPU put inputs on gpu manually
1299
- root_gpu = 0
1300
- if isinstance(self.data_parallel_device_ids, list):
1301
- root_gpu = self.data_parallel_device_ids[0]
1302
- batch = self.transfer_batch_to_gpu(batch, root_gpu)
1303
- args[0] = batch
1304
-
1305
- # CPU
1306
- if test:
1307
- output = model.test_step(*args)
1308
- else:
1309
- output = model.validation_step(*args)
1310
-
1311
- return output
1312
-
1313
- def train(self):
1314
- model = self.get_model()
1315
- # run all epochs
1316
- for epoch in range(self.current_epoch, 1000000):
1317
- # set seed for distributed sampler (enables shuffling for each epoch)
1318
- if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):
1319
- self.get_train_dataloader().sampler.set_epoch(epoch)
1320
-
1321
- # get model
1322
- model = self.get_model()
1323
-
1324
- # update training progress in trainer and model
1325
- model.current_epoch = epoch
1326
- self.current_epoch = epoch
1327
-
1328
- total_val_batches = 0
1329
- if not self.disable_validation:
1330
- # val can be checked multiple times in epoch
1331
- is_val_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0
1332
- val_checks_per_epoch = self.num_training_batches // self.val_check_batch
1333
- val_checks_per_epoch = val_checks_per_epoch if is_val_epoch else 0
1334
- total_val_batches = self.num_val_batches * val_checks_per_epoch
1335
-
1336
- # total batches includes multiple val checks
1337
- self.total_batches = self.num_training_batches + total_val_batches
1338
- self.batch_loss_value = 0 # accumulated grads
1339
-
1340
- if self.is_iterable_train_dataloader:
1341
- # for iterable train loader, the progress bar never ends
1342
- num_iterations = None
1343
- else:
1344
- num_iterations = self.total_batches
1345
-
1346
- # reset progress bar
1347
- # .reset() doesn't work on disabled progress bar so we should check
1348
- desc = f'Epoch {epoch + 1}' if not self.is_iterable_train_dataloader else ''
1349
- self.main_progress_bar.set_description(desc)
1350
-
1351
- # changing gradient according accumulation_scheduler
1352
- self.accumulation_scheduler.on_epoch_begin(epoch, self)
1353
-
1354
- # -----------------
1355
- # RUN TNG EPOCH
1356
- # -----------------
1357
- self.run_training_epoch()
1358
-
1359
- # update LR schedulers
1360
- if self.lr_schedulers is not None:
1361
- for lr_scheduler in self.lr_schedulers:
1362
- lr_scheduler.step(epoch=self.current_epoch)
1363
-
1364
- self.main_progress_bar.close()
1365
-
1366
- model.on_train_end()
1367
-
1368
- if self.logger is not None:
1369
- self.logger.finalize("success")
1370
-
1371
- def run_training_epoch(self):
1372
- # before epoch hook
1373
- if self.is_function_implemented('on_epoch_start'):
1374
- model = self.get_model()
1375
- model.on_epoch_start()
1376
-
1377
- # run epoch
1378
- for batch_idx, batch in enumerate(self.get_train_dataloader()):
1379
- # stop epoch if we limited the number of training batches
1380
- if batch_idx >= self.num_training_batches:
1381
- break
1382
-
1383
- self.batch_idx = batch_idx
1384
-
1385
- model = self.get_model()
1386
- model.global_step = self.global_step
1387
-
1388
- # ---------------
1389
- # RUN TRAIN STEP
1390
- # ---------------
1391
- output = self.run_training_batch(batch, batch_idx)
1392
- batch_result, grad_norm_dic, batch_step_metrics = output
1393
-
1394
- # when returning -1 from train_step, we end epoch early
1395
- early_stop_epoch = batch_result == -1
1396
-
1397
- # ---------------
1398
- # RUN VAL STEP
1399
- # ---------------
1400
- should_check_val = (
1401
- not self.disable_validation and self.global_step % self.val_check_batch == 0 and not self.fisrt_epoch)
1402
- self.fisrt_epoch = False
1403
-
1404
- if should_check_val:
1405
- self.run_evaluation(test=self.testing)
1406
-
1407
- # when logs should be saved
1408
- should_save_log = (batch_idx + 1) % self.log_save_interval == 0 or early_stop_epoch
1409
- if should_save_log:
1410
- if self.proc_rank == 0 and self.logger is not None:
1411
- self.logger.save()
1412
-
1413
- # when metrics should be logged
1414
- should_log_metrics = batch_idx % self.row_log_interval == 0 or early_stop_epoch
1415
- if should_log_metrics:
1416
- # logs user requested information to logger
1417
- self.log_metrics(batch_step_metrics, grad_norm_dic)
1418
-
1419
- self.global_step += 1
1420
- self.total_batch_idx += 1
1421
-
1422
- # end epoch early
1423
- # stop when the flag is changed or we've gone past the amount
1424
- # requested in the batches
1425
- if early_stop_epoch:
1426
- break
1427
- if self.global_step > self.max_updates:
1428
- print("| Training end..")
1429
- exit()
1430
-
1431
- # epoch end hook
1432
- if self.is_function_implemented('on_epoch_end'):
1433
- model = self.get_model()
1434
- model.on_epoch_end()
1435
-
1436
- def run_training_batch(self, batch, batch_idx):
1437
- # track grad norms
1438
- grad_norm_dic = {}
1439
-
1440
- # track all metrics for callbacks
1441
- all_callback_metrics = []
1442
-
1443
- # track metrics to log
1444
- all_log_metrics = []
1445
-
1446
- if batch is None:
1447
- return 0, grad_norm_dic, {}
1448
-
1449
- # hook
1450
- if self.is_function_implemented('on_batch_start'):
1451
- model_ref = self.get_model()
1452
- response = model_ref.on_batch_start(batch)
1453
-
1454
- if response == -1:
1455
- return -1, grad_norm_dic, {}
1456
-
1457
- splits = [batch]
1458
- self.hiddens = None
1459
- for split_idx, split_batch in enumerate(splits):
1460
- self.split_idx = split_idx
1461
-
1462
- # call training_step once per optimizer
1463
- for opt_idx, optimizer in enumerate(self.optimizers):
1464
- if optimizer is None:
1465
- continue
1466
- # make sure only the gradients of the current optimizer's paramaters are calculated
1467
- # in the training step to prevent dangling gradients in multiple-optimizer setup.
1468
- if len(self.optimizers) > 1:
1469
- for param in self.get_model().parameters():
1470
- param.requires_grad = False
1471
- for group in optimizer.param_groups:
1472
- for param in group['params']:
1473
- param.requires_grad = True
1474
-
1475
- # wrap the forward step in a closure so second order methods work
1476
- def optimizer_closure():
1477
- # forward pass
1478
- output = self.training_forward(
1479
- split_batch, batch_idx, opt_idx, self.hiddens)
1480
-
1481
- closure_loss = output[0]
1482
- progress_bar_metrics = output[1]
1483
- log_metrics = output[2]
1484
- callback_metrics = output[3]
1485
- self.hiddens = output[4]
1486
- if closure_loss is None:
1487
- return None
1488
-
1489
- # accumulate loss
1490
- # (if accumulate_grad_batches = 1 no effect)
1491
- closure_loss = closure_loss / self.accumulate_grad_batches
1492
-
1493
- # backward pass
1494
- model_ref = self.get_model()
1495
- if closure_loss.requires_grad:
1496
- model_ref.backward(closure_loss, optimizer)
1497
-
1498
- # track metrics for callbacks
1499
- all_callback_metrics.append(callback_metrics)
1500
-
1501
- # track progress bar metrics
1502
- self.add_tqdm_metrics(progress_bar_metrics)
1503
- all_log_metrics.append(log_metrics)
1504
-
1505
- # insert after step hook
1506
- if self.is_function_implemented('on_after_backward'):
1507
- model_ref = self.get_model()
1508
- model_ref.on_after_backward()
1509
-
1510
- return closure_loss
1511
-
1512
- # calculate loss
1513
- loss = optimizer_closure()
1514
- if loss is None:
1515
- continue
1516
-
1517
- # nan grads
1518
- if self.print_nan_grads:
1519
- self.print_nan_gradients()
1520
-
1521
- # track total loss for logging (avoid mem leaks)
1522
- self.batch_loss_value += loss.item()
1523
-
1524
- # gradient update with accumulated gradients
1525
- if (self.batch_idx + 1) % self.accumulate_grad_batches == 0:
1526
-
1527
- # track gradient norms when requested
1528
- if batch_idx % self.row_log_interval == 0:
1529
- if self.track_grad_norm > 0:
1530
- model = self.get_model()
1531
- grad_norm_dic = model.grad_norm(
1532
- self.track_grad_norm)
1533
-
1534
- # clip gradients
1535
- self.clip_gradients()
1536
-
1537
- # calls .step(), .zero_grad()
1538
- # override function to modify this behavior
1539
- model = self.get_model()
1540
- model.optimizer_step(self.current_epoch, batch_idx, optimizer, opt_idx)
1541
-
1542
- # calculate running loss for display
1543
- self.running_loss.append(self.batch_loss_value)
1544
- self.batch_loss_value = 0
1545
- self.avg_loss = np.mean(self.running_loss[-100:])
1546
-
1547
- # activate batch end hook
1548
- if self.is_function_implemented('on_batch_end'):
1549
- model = self.get_model()
1550
- model.on_batch_end()
1551
-
1552
- # update progress bar
1553
- self.main_progress_bar.update(1)
1554
- self.main_progress_bar.set_postfix(**self.training_tqdm_dict)
1555
-
1556
- # collapse all metrics into one dict
1557
- all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}
1558
-
1559
- # track all metrics for callbacks
1560
- self.callback_metrics.update({k: v for d in all_callback_metrics for k, v in d.items()})
1561
-
1562
- return 0, grad_norm_dic, all_log_metrics
1563
-
1564
- def training_forward(self, batch, batch_idx, opt_idx, hiddens):
1565
- """
1566
- Handle forward for each training case (distributed, single gpu, etc...)
1567
- :param batch:
1568
- :param batch_idx:
1569
- :return:
1570
- """
1571
- # ---------------
1572
- # FORWARD
1573
- # ---------------
1574
- # enable not needing to add opt_idx to training_step
1575
- args = [batch, batch_idx, opt_idx]
1576
-
1577
- # distributed forward
1578
- if self.use_ddp or self.use_dp:
1579
- output = self.model(*args)
1580
- # single GPU forward
1581
- elif self.single_gpu:
1582
- gpu_id = 0
1583
- if isinstance(self.data_parallel_device_ids, list):
1584
- gpu_id = self.data_parallel_device_ids[0]
1585
- batch = self.transfer_batch_to_gpu(copy.copy(batch), gpu_id)
1586
- args[0] = batch
1587
- output = self.model.training_step(*args)
1588
- # CPU forward
1589
- else:
1590
- output = self.model.training_step(*args)
1591
-
1592
- # allow any mode to define training_end
1593
- model_ref = self.get_model()
1594
- output_ = model_ref.training_end(output)
1595
- if output_ is not None:
1596
- output = output_
1597
-
1598
- # format and reduce outputs accordingly
1599
- output = self.process_output(output, train=True)
1600
-
1601
- return output
1602
-
1603
- # ---------------
1604
- # Utils
1605
- # ---------------
1606
- def is_function_implemented(self, f_name):
1607
- model = self.get_model()
1608
- f_op = getattr(model, f_name, None)
1609
- return callable(f_op)
1610
-
1611
- def _percent_range_check(self, name):
1612
- value = getattr(self, name)
1613
- msg = f"`{name}` must lie in the range [0.0, 1.0], but got {value:.3f}."
1614
- if name == "val_check_interval":
1615
- msg += " If you want to disable validation set `val_percent_check` to 0.0 instead."
1616
-
1617
- if not 0. <= value <= 1.:
1618
- raise ValueError(msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/audio_to_text/__init__.py DELETED
File without changes
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/text/encoding.py DELETED
@@ -1,9 +0,0 @@
1
- import chardet
2
-
3
-
4
- def get_encoding(file):
5
- with open(file, 'rb') as f:
6
- encoding = chardet.detect(f.read())['encoding']
7
- if encoding == 'GB2312':
8
- encoding = 'GB18030'
9
- return encoding
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/04-Gradio-SOTA-Seq2Seq-AutoQA/app.py DELETED
@@ -1,51 +0,0 @@
1
- import gradio as gr
2
- from qasrl_model_pipeline import QASRL_Pipeline
3
-
4
- models = ["kleinay/qanom-seq2seq-model-baseline",
5
- "kleinay/qanom-seq2seq-model-joint"]
6
- pipelines = {model: QASRL_Pipeline(model) for model in models}
7
-
8
-
9
- description = f"""Using Seq2Seq T5 model which takes a sequence of items and outputs another sequence this model generates Questions and Answers (QA) with focus on Semantic Role Labeling (SRL)"""
10
- title="Seq2Seq T5 Questions and Answers (QA) with Semantic Role Labeling (SRL)"
11
- examples = [[models[0], "In March and April the patient <p> had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "fall"],
12
- [models[1], "In March and April the patient had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions <p> like anaphylaxis and shortness of breath.", True, "reactions"],
13
- [models[0], "In March and April the patient had two falls. One was related <p> to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "relate"],
14
- [models[1], "In March and April the patient <p> had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", False, "fall"]]
15
-
16
- input_sent_box_label = "Insert sentence here. Mark the predicate by adding the token '<p>' before it."
17
- verb_form_inp_placeholder = "e.g. 'decide' for the nominalization 'decision', 'teach' for 'teacher', etc."
18
- links = """<p style='text-align: center'>
19
- <a href='https://www.qasrl.org' target='_blank'>QASRL Website</a> | <a href='https://huggingface.co/kleinay/qanom-seq2seq-model-baseline' target='_blank'>Model Repo at Huggingface Hub</a>
20
- </p>"""
21
- def call(model_name, sentence, is_nominal, verb_form):
22
- predicate_marker="<p>"
23
- if predicate_marker not in sentence:
24
- raise ValueError("You must highlight one word of the sentence as a predicate using preceding '<p>'.")
25
-
26
- if not verb_form:
27
- if is_nominal:
28
- raise ValueError("You should provide the verbal form of the nominalization")
29
-
30
- toks = sentence.split(" ")
31
- pred_idx = toks.index(predicate_marker)
32
- predicate = toks(pred_idx+1)
33
- verb_form=predicate
34
- pipeline = pipelines[model_name]
35
- pipe_out = pipeline([sentence],
36
- predicate_marker=predicate_marker,
37
- predicate_type="nominal" if is_nominal else "verbal",
38
- verb_form=verb_form)[0]
39
- return pipe_out["QAs"], pipe_out["generated_text"]
40
- iface = gr.Interface(fn=call,
41
- inputs=[gr.inputs.Radio(choices=models, default=models[0], label="Model"),
42
- gr.inputs.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4),
43
- gr.inputs.Checkbox(default=True, label="Is Nominalization?"),
44
- gr.inputs.Textbox(placeholder=verb_form_inp_placeholder, label="Verbal form (for nominalizations)", default='')],
45
- outputs=[gr.outputs.JSON(label="Model Output - QASRL"), gr.outputs.Textbox(label="Raw output sequence")],
46
- title=title,
47
- description=description,
48
- article=links,
49
- examples=examples )
50
-
51
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py DELETED
@@ -1,12 +0,0 @@
1
- _base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py'
2
-
3
- # fast means faster training speed,
4
- # but less flexibility for multitasking
5
- model = dict(
6
- data_preprocessor=dict(
7
- type='YOLOv5DetDataPreprocessor',
8
- mean=[0., 0., 0.],
9
- std=[255., 255., 255.],
10
- bgr_to_rgb=True))
11
-
12
- train_dataloader = dict(collate_fn=dict(type='yolov5_collate'))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/utils/loggers/comet/README.md DELETED
@@ -1,256 +0,0 @@
1
- <img src="https://cdn.comet.ml/img/notebook_logo.png">
2
-
3
- # YOLOv5 with Comet
4
-
5
- This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2)
6
-
7
- # About Comet
8
-
9
- Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models.
10
-
11
- Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)!
12
- Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!
13
-
14
- # Getting Started
15
-
16
- ## Install Comet
17
-
18
- ```shell
19
- pip install comet_ml
20
- ```
21
-
22
- ## Configure Comet Credentials
23
-
24
- There are two ways to configure Comet with YOLOv5.
25
-
26
- You can either set your credentials through enviroment variables
27
-
28
- **Environment Variables**
29
-
30
- ```shell
31
- export COMET_API_KEY=<Your Comet API Key>
32
- export COMET_PROJECT_NAME=<Your Comet Project Name> # This will default to 'yolov5'
33
- ```
34
-
35
- Or create a `.comet.config` file in your working directory and set your credentials there.
36
-
37
- **Comet Configuration File**
38
-
39
- ```
40
- [comet]
41
- api_key=<Your Comet API Key>
42
- project_name=<Your Comet Project Name> # This will default to 'yolov5'
43
- ```
44
-
45
- ## Run the Training Script
46
-
47
- ```shell
48
- # Train YOLOv5s on COCO128 for 5 epochs
49
- python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt
50
- ```
51
-
52
- That's it! Comet will automatically log your hyperparameters, command line arguments, training and valiation metrics. You can visualize and analyze your runs in the Comet UI
53
-
54
- <img width="1920" alt="yolo-ui" src="https://user-images.githubusercontent.com/26833433/202851203-164e94e1-2238-46dd-91f8-de020e9d6b41.png">
55
-
56
- # Try out an Example!
57
- Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
58
-
59
- Or better yet, try it out yourself in this Colab Notebook
60
-
61
- [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)
62
-
63
- # Log automatically
64
-
65
- By default, Comet will log the following items
66
-
67
- ## Metrics
68
- - Box Loss, Object Loss, Classification Loss for the training and validation data
69
- - mAP_0.5, mAP_0.5:0.95 metrics for the validation data.
70
- - Precision and Recall for the validation data
71
-
72
- ## Parameters
73
-
74
- - Model Hyperparameters
75
- - All parameters passed through the command line options
76
-
77
- ## Visualizations
78
-
79
- - Confusion Matrix of the model predictions on the validation data
80
- - Plots for the PR and F1 curves across all classes
81
- - Correlogram of the Class Labels
82
-
83
- # Configure Comet Logging
84
-
85
- Comet can be configured to log additional data either through command line flags passed to the training script
86
- or through environment variables.
87
-
88
- ```shell
89
- export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online
90
- export COMET_MODEL_NAME=<your model name> #Set the name for the saved model. Defaults to yolov5
91
- export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true
92
- export COMET_MAX_IMAGE_UPLOADS=<number of allowed images to upload to Comet> # Controls how many total image predictions to log to Comet. Defaults to 100.
93
- export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false
94
- export COMET_DEFAULT_CHECKPOINT_FILENAME=<your checkpoint filename> # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt'
95
- export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false.
96
- export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions
97
- ```
98
-
99
- ## Logging Checkpoints with Comet
100
-
101
- Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the
102
- logged checkpoints to Comet based on the interval value provided by `save-period`
103
-
104
- ```shell
105
- python train.py \
106
- --img 640 \
107
- --batch 16 \
108
- --epochs 5 \
109
- --data coco128.yaml \
110
- --weights yolov5s.pt \
111
- --save-period 1
112
- ```
113
-
114
- ## Logging Model Predictions
115
-
116
- By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet.
117
-
118
- You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch.
119
-
120
- **Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly.
121
-
122
- Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
123
-
124
-
125
- ```shell
126
- python train.py \
127
- --img 640 \
128
- --batch 16 \
129
- --epochs 5 \
130
- --data coco128.yaml \
131
- --weights yolov5s.pt \
132
- --bbox_interval 2
133
- ```
134
-
135
- ### Controlling the number of Prediction Images logged to Comet
136
-
137
- When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable.
138
-
139
- ```shell
140
- env COMET_MAX_IMAGE_UPLOADS=200 python train.py \
141
- --img 640 \
142
- --batch 16 \
143
- --epochs 5 \
144
- --data coco128.yaml \
145
- --weights yolov5s.pt \
146
- --bbox_interval 1
147
- ```
148
-
149
- ### Logging Class Level Metrics
150
-
151
- Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class.
152
-
153
- ```shell
154
- env COMET_LOG_PER_CLASS_METRICS=true python train.py \
155
- --img 640 \
156
- --batch 16 \
157
- --epochs 5 \
158
- --data coco128.yaml \
159
- --weights yolov5s.pt
160
- ```
161
-
162
- ## Uploading a Dataset to Comet Artifacts
163
-
164
- If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag.
165
-
166
- The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file.
167
-
168
- ```shell
169
- python train.py \
170
- --img 640 \
171
- --batch 16 \
172
- --epochs 5 \
173
- --data coco128.yaml \
174
- --weights yolov5s.pt \
175
- --upload_dataset
176
- ```
177
-
178
- You can find the uploaded dataset in the Artifacts tab in your Comet Workspace
179
- <img width="1073" alt="artifact-1" src="https://user-images.githubusercontent.com/7529846/186929193-162718bf-ec7b-4eb9-8c3b-86b3763ef8ea.png">
180
-
181
- You can preview the data directly in the Comet UI.
182
- <img width="1082" alt="artifact-2" src="https://user-images.githubusercontent.com/7529846/186929215-432c36a9-c109-4eb0-944b-84c2786590d6.png">
183
-
184
- Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file
185
- <img width="963" alt="artifact-3" src="https://user-images.githubusercontent.com/7529846/186929256-9d44d6eb-1a19-42de-889a-bcbca3018f2e.png">
186
-
187
- ### Using a saved Artifact
188
-
189
- If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL.
190
-
191
- ```
192
- # contents of artifact.yaml file
193
- path: "comet://<workspace name>/<artifact name>:<artifact version or alias>"
194
- ```
195
- Then pass this file to your training script in the following way
196
-
197
- ```shell
198
- python train.py \
199
- --img 640 \
200
- --batch 16 \
201
- --epochs 5 \
202
- --data artifact.yaml \
203
- --weights yolov5s.pt
204
- ```
205
-
206
- Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset.
207
- <img width="1391" alt="artifact-4" src="https://user-images.githubusercontent.com/7529846/186929264-4c4014fa-fe51-4f3c-a5c5-f6d24649b1b4.png">
208
-
209
- ## Resuming a Training Run
210
-
211
- If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path.
212
-
213
- The Run Path has the following format `comet://<your workspace name>/<your project name>/<experiment id>`.
214
-
215
- This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI
216
-
217
- ```shell
218
- python train.py \
219
- --resume "comet://<your run path>"
220
- ```
221
-
222
- ## Hyperparameter Search with the Comet Optimizer
223
-
224
- YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualie hyperparameter sweeps in the Comet UI.
225
-
226
- ### Configuring an Optimizer Sweep
227
-
228
- To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json`
229
-
230
- ```shell
231
- python utils/loggers/comet/hpo.py \
232
- --comet_optimizer_config "utils/loggers/comet/optimizer_config.json"
233
- ```
234
-
235
- The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after
236
- the script.
237
-
238
- ```shell
239
- python utils/loggers/comet/hpo.py \
240
- --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \
241
- --save-period 1 \
242
- --bbox_interval 1
243
- ```
244
-
245
- ### Running a Sweep in Parallel
246
-
247
- ```shell
248
- comet optimizer -j <set number of workers> utils/loggers/comet/hpo.py \
249
- utils/loggers/comet/optimizer_config.json"
250
- ```
251
-
252
- ### Visualizing Results
253
-
254
- Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)
255
-
256
- <img width="1626" alt="hyperparameter-yolo" src="https://user-images.githubusercontent.com/7529846/186914869-7dc1de14-583f-4323-967b-c9a66a29e495.png">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AfrodreamsAI/afrodreams/INSTALL.md DELETED
@@ -1,293 +0,0 @@
1
- # neural-style-pt Installation
2
-
3
- This guide will walk you through multiple ways to setup `neural-style-pt` on Ubuntu and Windows. If you wish to install PyTorch and neural-style-pt on a different operating system like MacOS, installation guides can be found [here](https://pytorch.org).
4
-
5
- Note that in order to reduce their size, the pre-packaged binary releases (pip, Conda, etc...) have removed support for some older GPUs, and thus you will have to install from source in order to use these GPUs.
6
-
7
-
8
- # Ubuntu:
9
-
10
- ## With A Package Manager:
11
-
12
- The pip and Conda packages ship with CUDA and cuDNN already built in, so after you have installed PyTorch with pip or Conda, you can skip to [installing neural-style-pt](https://github.com/ProGamerGov/neural-style-pt/blob/master/INSTALL.md#install-neural-style-pt).
13
-
14
- ### pip:
15
-
16
- The neural-style-pt PyPI page can be found here: https://pypi.org/project/neural-style/
17
-
18
- If you wish to install neural-style-pt as a pip package, then use the following command:
19
-
20
- ```
21
- # in a terminal, run the command
22
- pip install neural-style
23
- ```
24
-
25
- Or:
26
-
27
-
28
- ```
29
- # in a terminal, run the command
30
- pip3 install neural-style
31
- ```
32
-
33
- Next download the models with:
34
-
35
-
36
- ```
37
- neural-style -download_models
38
- ```
39
-
40
- By default the models are downloaded to your home directory, but you can specify a download location with:
41
-
42
- ```
43
- neural-style -download_models <download_path>
44
- ```
45
-
46
- #### Github and pip:
47
-
48
- Following the pip installation instructions
49
- [here](http://pytorch.org), you can install PyTorch with the following commands:
50
-
51
- ```
52
- # in a terminal, run the commands
53
- cd ~/
54
- pip install torch torchvision
55
- ```
56
-
57
- Or:
58
-
59
- ```
60
- cd ~/
61
- pip3 install torch torchvision
62
- ```
63
-
64
- Now continue on to [installing neural-style-pt](https://github.com/ProGamerGov/neural-style-pt/blob/master/INSTALL.md#install-neural-style-pt) to install neural-style-pt.
65
-
66
- ### Conda:
67
-
68
- Following the Conda installation instructions
69
- [here](http://pytorch.org), you can install PyTorch with the following command:
70
-
71
- ```
72
- conda install pytorch torchvision -c pytorch
73
- ```
74
-
75
- Now continue on to [installing neural-style-pt](https://github.com/ProGamerGov/neural-style-pt/blob/master/INSTALL.md#install-neural-style-pt) to install neural-style-pt.
76
-
77
- ## From Source:
78
-
79
- ### (Optional) Step 1: Install CUDA
80
-
81
- If you have a [CUDA-capable GPU from NVIDIA](https://developer.nvidia.com/cuda-gpus) then you can
82
- speed up `neural-style-pt` with CUDA.
83
-
84
- First download and unpack the local CUDA installer from NVIDIA; note that there are different
85
- installers for each recent version of Ubuntu:
86
-
87
- ```
88
- # For Ubuntu 18.04
89
- sudo dpkg -i cuda-repo-ubuntu1804-10-1-local-10.1.243-418.87.00_1.0-1_amd64.deb
90
- sudo apt-key add /var/cuda-repo-<version>/7fa2af80.pub
91
- ```
92
-
93
- ```
94
- # For Ubuntu 16.04
95
- sudo dpkg -i cuda-repo-ubuntu1604-10-1-local-10.1.243-418.87.00_1.0-1_amd64.deb
96
- sudo apt-key add /var/cuda-repo-<version>/7fa2af80.pub
97
- ```
98
-
99
- Instructions for downloading and installing the latest CUDA version on all supported operating systems, can be found [here](https://developer.nvidia.com/cuda-downloads).
100
-
101
- Now update the repository cache and install CUDA. Note that this will also install a graphics driver from NVIDIA.
102
-
103
- ```
104
- sudo apt-get update
105
- sudo apt-get install cuda
106
- ```
107
-
108
- At this point you may need to reboot your machine to load the new graphics driver.
109
- After rebooting, you should be able to see the status of your graphics card(s) by running
110
- the command `nvidia-smi`; it should give output that looks something like this:
111
-
112
- ```
113
- Wed Apr 11 21:54:49 2018
114
- +-----------------------------------------------------------------------------+
115
- | NVIDIA-SMI 384.90 Driver Version: 384.90 |
116
- |-------------------------------+----------------------+----------------------+
117
- | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
118
- | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
119
- |===============================+======================+======================|
120
- | 0 Tesla K80 Off | 00000000:00:1E.0 Off | 0 |
121
- | N/A 62C P0 68W / 149W | 0MiB / 11439MiB | 94% Default |
122
- +-------------------------------+----------------------+----------------------+
123
-
124
- +-----------------------------------------------------------------------------+
125
- | Processes: GPU Memory |
126
- | GPU PID Type Process name Usage |
127
- |=============================================================================|
128
- | No running processes found |
129
- +-----------------------------------------------------------------------------+
130
- ```
131
-
132
- ### (Optional) Step 2: Install cuDNN
133
-
134
- cuDNN is a library from NVIDIA that efficiently implements many of the operations (like convolutions and pooling)
135
- that are commonly used in deep learning.
136
-
137
- After registering as a developer with NVIDIA, you can [download cuDNN here](https://developer.nvidia.com/cudnn). Make sure that you use the approprite version of cuDNN for your version of CUDA.
138
-
139
- After dowloading, you can unpack and install cuDNN like this:
140
-
141
- ```
142
- tar -zxvf cudnn-10.1-linux-x64-v7.5.0.56.tgz
143
- sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
144
- sudo cp cuda/include/cudnn.h /usr/local/cuda/include
145
- ```
146
-
147
- Note that the cuDNN backend can only be used for GPU mode.
148
-
149
- ### (Optional) Steps 1-3: Install PyTorch with support for AMD GPUs using Radeon Open Compute Stack (ROCm)
150
-
151
-
152
- It is recommended that if you wish to use PyTorch with an AMD GPU, you install it via the official ROCm dockerfile:
153
- https://rocm.github.io/pytorch.html
154
-
155
- - Supported AMD GPUs for the dockerfile are: Vega10 / gfx900 generation discrete graphics cards (Vega56, Vega64, or MI25).
156
-
157
- PyTorch does not officially provide support for compilation on the host with AMD GPUs, but [a user guide posted here](https://github.com/ROCmSoftwarePlatform/pytorch/issues/337#issuecomment-467220107) apparently works well.
158
-
159
- ROCm utilizes a CUDA porting tool called HIP, which automatically converts CUDA code into HIP code. HIP code can run on both AMD and Nvidia GPUs.
160
-
161
-
162
- ### Step 3: Install PyTorch
163
-
164
- To install PyTorch [from source](https://github.com/pytorch/pytorch#from-source) on Ubuntu (Instructions may be different if you are using a different OS):
165
-
166
- ```
167
- cd ~/
168
- git clone --recursive https://github.com/pytorch/pytorch
169
- cd pytorch
170
- python setup.py install
171
-
172
- cd ~/
173
- git clone --recursive https://github.com/pytorch/vision
174
- cd vision
175
- python setup.py install
176
- ```
177
-
178
- To check that your torch installation is working, run the command `python` or `python3` to enter the Python interpreter. Then type `import torch` and hit enter.
179
-
180
- You can then type `print(torch.version.cuda)` and `print(torch.backends.cudnn.version())` to confirm that you are using the desired versions of CUDA and cuDNN.
181
-
182
- To quit just type `exit()` or use Ctrl-D.
183
-
184
- Now continue on to [installing neural-style-pt](https://github.com/ProGamerGov/neural-style-pt/blob/master/INSTALL.md#install-neural-style-pt) to install neural-style-pt.
185
-
186
-
187
- # Windows Installation
188
-
189
- If you wish to install PyTorch on Windows From Source or via Conda, you can find instructions on the PyTorch website: https://pytorch.org/
190
-
191
-
192
- ### Github and pip
193
-
194
- First, you will need to download Python 3 and install it: https://www.python.org/downloads/windows/. I recommend using the executable installer for the latest version of Python 3.
195
-
196
- Then using https://pytorch.org/, get the correct pip command, paste it into the Command Prompt (CMD) and hit enter:
197
-
198
-
199
- ```
200
- pip3 install torch===1.3.0 torchvision===0.4.1 -f https://download.pytorch.org/whl/torch_stable.html
201
- ```
202
-
203
-
204
- After installing PyTorch, download the neural-style-pt Github respository and extract/unzip it to the desired location.
205
-
206
- Then copy the file path to your neural-style-pt folder, and paste it into the Command Prompt, with `cd` in front of it and then hit enter.
207
-
208
- In the example below, the neural-style-pt folder was placed on the desktop:
209
-
210
- ```
211
- cd C:\Users\<User_Name>\Desktop\neural-style-pt-master
212
- ```
213
-
214
- You can now continue on to [installing neural-style-pt](https://github.com/ProGamerGov/neural-style-pt/blob/master/INSTALL.md#install-neural-style-pt), skipping the `git clone` step.
215
-
216
- # Install neural-style-pt
217
-
218
- First we clone `neural-style-pt` from GitHub:
219
-
220
- ```
221
- cd ~/
222
- git clone https://github.com/ProGamerGov/neural-style-pt.git
223
- cd neural-style-pt
224
- ```
225
-
226
- Next we need to download the pretrained neural network models:
227
-
228
- ```
229
- python models/download_models.py
230
- ```
231
-
232
- You should now be able to run `neural-style-pt` in CPU mode like this:
233
-
234
- ```
235
- python neural_style.py -gpu c -print_iter 1
236
- ```
237
-
238
- If you installed PyTorch with support for CUDA, then should now be able to run `neural-style-pt` in GPU mode like this:
239
-
240
- ```
241
- python neural_style.py -gpu 0 -print_iter 1
242
- ```
243
-
244
- If you installed PyTorch with support for cuDNN, then you should now be able to run `neural-style-pt` with the `cudnn` backend like this:
245
-
246
- ```
247
- python neural_style.py -gpu 0 -backend cudnn -print_iter 1
248
- ```
249
-
250
- If everything is working properly you should see output like this:
251
-
252
- ```
253
- Iteration 1 / 1000
254
- Content 1 loss: 1616196.125
255
- Style 1 loss: 29890.9980469
256
- Style 2 loss: 658038.625
257
- Style 3 loss: 145283.671875
258
- Style 4 loss: 11347409.0
259
- Style 5 loss: 563.368896484
260
- Total loss: 13797382.0
261
- Iteration 2 / 1000
262
- Content 1 loss: 1616195.625
263
- Style 1 loss: 29890.9980469
264
- Style 2 loss: 658038.625
265
- Style 3 loss: 145283.671875
266
- Style 4 loss: 11347409.0
267
- Style 5 loss: 563.368896484
268
- Total loss: 13797382.0
269
- Iteration 3 / 1000
270
- Content 1 loss: 1579918.25
271
- Style 1 loss: 29881.3164062
272
- Style 2 loss: 654351.75
273
- Style 3 loss: 144214.640625
274
- Style 4 loss: 11301945.0
275
- Style 5 loss: 562.733032227
276
- Total loss: 13711628.0
277
- Iteration 4 / 1000
278
- Content 1 loss: 1460443.0
279
- Style 1 loss: 29849.7226562
280
- Style 2 loss: 643799.1875
281
- Style 3 loss: 140405.015625
282
- Style 4 loss: 10940431.0
283
- Style 5 loss: 553.507446289
284
- Total loss: 13217080.0
285
- Iteration 5 / 1000
286
- Content 1 loss: 1298983.625
287
- Style 1 loss: 29734.8964844
288
- Style 2 loss: 604133.8125
289
- Style 3 loss: 125455.945312
290
- Style 4 loss: 8850759.0
291
- Style 5 loss: 526.118591309
292
- Total loss: 10912633.0
293
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/nsf_hifigan/nvSTFT.py DELETED
@@ -1,134 +0,0 @@
1
- import math
2
- import os
3
- os.environ["LRU_CACHE_CAPACITY"] = "3"
4
- import random
5
- import torch
6
- import torch.utils.data
7
- import numpy as np
8
- import librosa
9
- from librosa.util import normalize
10
- from librosa.filters import mel as librosa_mel_fn
11
- from scipy.io.wavfile import read
12
- import soundfile as sf
13
- import torch.nn.functional as F
14
-
15
- def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False):
16
- sampling_rate = None
17
- try:
18
- data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile.
19
- except Exception as ex:
20
- print(f"'{full_path}' failed to load.\nException:")
21
- print(ex)
22
- if return_empty_on_exception:
23
- return [], sampling_rate or target_sr or 48000
24
- else:
25
- raise Exception(ex)
26
-
27
- if len(data.shape) > 1:
28
- data = data[:, 0]
29
- assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension)
30
-
31
- if np.issubdtype(data.dtype, np.integer): # if audio data is type int
32
- max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX
33
- else: # if audio data is type fp32
34
- max_mag = max(np.amax(data), -np.amin(data))
35
- max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32
36
-
37
- data = torch.FloatTensor(data.astype(np.float32))/max_mag
38
-
39
- if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except
40
- return [], sampling_rate or target_sr or 48000
41
- if target_sr is not None and sampling_rate != target_sr:
42
- data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr))
43
- sampling_rate = target_sr
44
-
45
- return data, sampling_rate
46
-
47
- def dynamic_range_compression(x, C=1, clip_val=1e-5):
48
- return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
49
-
50
- def dynamic_range_decompression(x, C=1):
51
- return np.exp(x) / C
52
-
53
- def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
54
- return torch.log(torch.clamp(x, min=clip_val) * C)
55
-
56
- def dynamic_range_decompression_torch(x, C=1):
57
- return torch.exp(x) / C
58
-
59
- class STFT():
60
- def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5):
61
- self.target_sr = sr
62
-
63
- self.n_mels = n_mels
64
- self.n_fft = n_fft
65
- self.win_size = win_size
66
- self.hop_length = hop_length
67
- self.fmin = fmin
68
- self.fmax = fmax
69
- self.clip_val = clip_val
70
- self.mel_basis = {}
71
- self.hann_window = {}
72
-
73
- def get_mel(self, y, keyshift=0, speed=1, center=False):
74
- sampling_rate = self.target_sr
75
- n_mels = self.n_mels
76
- n_fft = self.n_fft
77
- win_size = self.win_size
78
- hop_length = self.hop_length
79
- fmin = self.fmin
80
- fmax = self.fmax
81
- clip_val = self.clip_val
82
-
83
- factor = 2 ** (keyshift / 12)
84
- n_fft_new = int(np.round(n_fft * factor))
85
- win_size_new = int(np.round(win_size * factor))
86
- hop_length_new = int(np.round(hop_length * speed))
87
-
88
- if torch.min(y) < -1.:
89
- print('min value is ', torch.min(y))
90
- if torch.max(y) > 1.:
91
- print('max value is ', torch.max(y))
92
-
93
- mel_basis_key = str(fmax)+'_'+str(y.device)
94
- if mel_basis_key not in self.mel_basis:
95
- mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
96
- self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)
97
-
98
- keyshift_key = str(keyshift)+'_'+str(y.device)
99
- if keyshift_key not in self.hann_window:
100
- self.hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device)
101
-
102
- pad_left = (win_size_new - hop_length_new) //2
103
- pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left)
104
- if pad_right < y.size(-1):
105
- mode = 'reflect'
106
- else:
107
- mode = 'constant'
108
- y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode)
109
- y = y.squeeze(1)
110
-
111
- spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=self.hann_window[keyshift_key],
112
- center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
113
- # print(111,spec)
114
- spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
115
- if keyshift != 0:
116
- size = n_fft // 2 + 1
117
- resize = spec.size(1)
118
- if resize < size:
119
- spec = F.pad(spec, (0, 0, 0, size-resize))
120
- spec = spec[:, :size, :] * win_size / win_size_new
121
-
122
- # print(222,spec)
123
- spec = torch.matmul(self.mel_basis[mel_basis_key], spec)
124
- # print(333,spec)
125
- spec = dynamic_range_compression_torch(spec, clip_val=clip_val)
126
- # print(444,spec)
127
- return spec
128
-
129
- def __call__(self, audiopath):
130
- audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr)
131
- spect = self.get_mel(audio.unsqueeze(0)).squeeze(0)
132
- return spect
133
-
134
- stft = STFT()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexN/pull_up/TractionModel.py DELETED
@@ -1,59 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- Created on Sun Jul 4 15:07:27 2021
4
-
5
- @author: AlexandreN
6
- """
7
- from __future__ import print_function, division
8
-
9
- import torch
10
- import torch.nn as nn
11
- import torchvision
12
-
13
-
14
- class SingleTractionHead(nn.Module):
15
-
16
- def __init__(self):
17
- super(SingleTractionHead, self).__init__()
18
-
19
- self.head_locs = nn.Sequential(nn.Linear(2048, 1024),
20
- nn.ReLU(),
21
- nn.Dropout(p=0.3),
22
- nn.Linear(1024, 4),
23
- nn.Sigmoid()
24
- )
25
-
26
- # Head class should output the logits over the classe
27
- self.head_class = nn.Sequential(nn.Linear(2048, 128),
28
- nn.ReLU(),
29
- nn.Dropout(p=0.3),
30
- nn.Linear(128, 1))
31
-
32
- def forward(self, features):
33
- features = features.view(features.size()[0], -1)
34
-
35
- y_bbox = self.head_locs(features)
36
- y_class = self.head_class(features)
37
-
38
- res = (y_bbox, y_class)
39
- return res
40
-
41
-
42
- def create_model():
43
- # setup the architecture of the model
44
- feature_extractor = torchvision.models.resnet50(pretrained=True)
45
- model_body = nn.Sequential(*list(feature_extractor.children())[:-1])
46
- for param in model_body.parameters():
47
- param.requires_grad = False
48
- # Parameters of newly constructed modules have requires_grad=True by default
49
- # num_ftrs = model_body.fc.in_features
50
-
51
- model_head = SingleTractionHead()
52
- model = nn.Sequential(model_body, model_head)
53
- return model
54
-
55
-
56
- def load_weights(model, path='model.pt', device_='cpu'):
57
- checkpoint = torch.load(path, map_location=torch.device(device_))
58
- model.load_state_dict(checkpoint)
59
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/korean.py DELETED
@@ -1,210 +0,0 @@
1
- import re
2
- from jamo import h2j, j2hcj
3
- import ko_pron
4
-
5
-
6
- # This is a list of Korean classifiers preceded by pure Korean numerals.
7
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
8
-
9
- # List of (hangul, hangul divided) pairs:
10
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
11
- ('ㄳ', 'ㄱㅅ'),
12
- ('ㄵ', 'ㄴㅈ'),
13
- ('ㄶ', 'ㄴㅎ'),
14
- ('ㄺ', 'ㄹㄱ'),
15
- ('ㄻ', 'ㄹㅁ'),
16
- ('ㄼ', 'ㄹㅂ'),
17
- ('ㄽ', 'ㄹㅅ'),
18
- ('ㄾ', 'ㄹㅌ'),
19
- ('ㄿ', 'ㄹㅍ'),
20
- ('ㅀ', 'ㄹㅎ'),
21
- ('ㅄ', 'ㅂㅅ'),
22
- ('ㅘ', 'ㅗㅏ'),
23
- ('ㅙ', 'ㅗㅐ'),
24
- ('ㅚ', 'ㅗㅣ'),
25
- ('ㅝ', 'ㅜㅓ'),
26
- ('ㅞ', 'ㅜㅔ'),
27
- ('ㅟ', 'ㅜㅣ'),
28
- ('ㅢ', 'ㅡㅣ'),
29
- ('ㅑ', 'ㅣㅏ'),
30
- ('ㅒ', 'ㅣㅐ'),
31
- ('ㅕ', 'ㅣㅓ'),
32
- ('ㅖ', 'ㅣㅔ'),
33
- ('ㅛ', 'ㅣㅗ'),
34
- ('ㅠ', 'ㅣㅜ')
35
- ]]
36
-
37
- # List of (Latin alphabet, hangul) pairs:
38
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
39
- ('a', '에이'),
40
- ('b', '비'),
41
- ('c', '시'),
42
- ('d', '디'),
43
- ('e', '이'),
44
- ('f', '에프'),
45
- ('g', '지'),
46
- ('h', '에이치'),
47
- ('i', '아이'),
48
- ('j', '제이'),
49
- ('k', '케이'),
50
- ('l', '엘'),
51
- ('m', '엠'),
52
- ('n', '엔'),
53
- ('o', '오'),
54
- ('p', '피'),
55
- ('q', '큐'),
56
- ('r', '아르'),
57
- ('s', '에스'),
58
- ('t', '티'),
59
- ('u', '유'),
60
- ('v', '브이'),
61
- ('w', '더블유'),
62
- ('x', '엑스'),
63
- ('y', '와이'),
64
- ('z', '제트')
65
- ]]
66
-
67
- # List of (ipa, lazy ipa) pairs:
68
- _ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
69
- ('t͡ɕ','ʧ'),
70
- ('d͡ʑ','ʥ'),
71
- ('ɲ','n^'),
72
- ('ɕ','ʃ'),
73
- ('ʷ','w'),
74
- ('ɭ','l`'),
75
- ('ʎ','ɾ'),
76
- ('ɣ','ŋ'),
77
- ('ɰ','ɯ'),
78
- ('ʝ','j'),
79
- ('ʌ','ə'),
80
- ('ɡ','g'),
81
- ('\u031a','#'),
82
- ('\u0348','='),
83
- ('\u031e',''),
84
- ('\u0320',''),
85
- ('\u0339','')
86
- ]]
87
-
88
-
89
- def latin_to_hangul(text):
90
- for regex, replacement in _latin_to_hangul:
91
- text = re.sub(regex, replacement, text)
92
- return text
93
-
94
-
95
- def divide_hangul(text):
96
- text = j2hcj(h2j(text))
97
- for regex, replacement in _hangul_divided:
98
- text = re.sub(regex, replacement, text)
99
- return text
100
-
101
-
102
- def hangul_number(num, sino=True):
103
- '''Reference https://github.com/Kyubyong/g2pK'''
104
- num = re.sub(',', '', num)
105
-
106
- if num == '0':
107
- return '영'
108
- if not sino and num == '20':
109
- return '스무'
110
-
111
- digits = '123456789'
112
- names = '일이삼사오육칠팔구'
113
- digit2name = {d: n for d, n in zip(digits, names)}
114
-
115
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
116
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
117
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
118
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
119
-
120
- spelledout = []
121
- for i, digit in enumerate(num):
122
- i = len(num) - i - 1
123
- if sino:
124
- if i == 0:
125
- name = digit2name.get(digit, '')
126
- elif i == 1:
127
- name = digit2name.get(digit, '') + '십'
128
- name = name.replace('일십', '십')
129
- else:
130
- if i == 0:
131
- name = digit2mod.get(digit, '')
132
- elif i == 1:
133
- name = digit2dec.get(digit, '')
134
- if digit == '0':
135
- if i % 4 == 0:
136
- last_three = spelledout[-min(3, len(spelledout)):]
137
- if ''.join(last_three) == '':
138
- spelledout.append('')
139
- continue
140
- else:
141
- spelledout.append('')
142
- continue
143
- if i == 2:
144
- name = digit2name.get(digit, '') + '백'
145
- name = name.replace('일백', '백')
146
- elif i == 3:
147
- name = digit2name.get(digit, '') + '천'
148
- name = name.replace('일천', '천')
149
- elif i == 4:
150
- name = digit2name.get(digit, '') + '만'
151
- name = name.replace('일만', '만')
152
- elif i == 5:
153
- name = digit2name.get(digit, '') + '십'
154
- name = name.replace('일십', '십')
155
- elif i == 6:
156
- name = digit2name.get(digit, '') + '백'
157
- name = name.replace('일백', '백')
158
- elif i == 7:
159
- name = digit2name.get(digit, '') + '천'
160
- name = name.replace('일천', '천')
161
- elif i == 8:
162
- name = digit2name.get(digit, '') + '억'
163
- elif i == 9:
164
- name = digit2name.get(digit, '') + '십'
165
- elif i == 10:
166
- name = digit2name.get(digit, '') + '백'
167
- elif i == 11:
168
- name = digit2name.get(digit, '') + '천'
169
- elif i == 12:
170
- name = digit2name.get(digit, '') + '조'
171
- elif i == 13:
172
- name = digit2name.get(digit, '') + '십'
173
- elif i == 14:
174
- name = digit2name.get(digit, '') + '백'
175
- elif i == 15:
176
- name = digit2name.get(digit, '') + '천'
177
- spelledout.append(name)
178
- return ''.join(elem for elem in spelledout)
179
-
180
-
181
- def number_to_hangul(text):
182
- '''Reference https://github.com/Kyubyong/g2pK'''
183
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
184
- for token in tokens:
185
- num, classifier = token
186
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
187
- spelledout = hangul_number(num, sino=False)
188
- else:
189
- spelledout = hangul_number(num, sino=True)
190
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
191
- # digit by digit for remaining digits
192
- digits = '0123456789'
193
- names = '영일이삼사오육칠팔구'
194
- for d, n in zip(digits, names):
195
- text = text.replace(d, n)
196
- return text
197
-
198
-
199
- def korean_to_lazy_ipa(text):
200
- text = latin_to_hangul(text)
201
- text = number_to_hangul(text)
202
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa').split('] ~ [')[0],text)
203
- for regex, replacement in _ipa_to_lazy_ipa:
204
- text = re.sub(regex, replacement, text)
205
- return text
206
-
207
-
208
- def korean_to_ipa(text):
209
- text = korean_to_lazy_ipa(text)
210
- return text.replace('ʧ','tʃ').replace('ʥ','dʑ')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/controlnet.py DELETED
@@ -1,822 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from dataclasses import dataclass
15
- from typing import Any, Dict, List, Optional, Tuple, Union
16
-
17
- import torch
18
- from torch import nn
19
- from torch.nn import functional as F
20
-
21
- from ..configuration_utils import ConfigMixin, register_to_config
22
- from ..loaders import FromOriginalControlnetMixin
23
- from ..utils import BaseOutput, logging
24
- from .attention_processor import AttentionProcessor, AttnProcessor
25
- from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps
26
- from .modeling_utils import ModelMixin
27
- from .unet_2d_blocks import (
28
- CrossAttnDownBlock2D,
29
- DownBlock2D,
30
- UNetMidBlock2DCrossAttn,
31
- get_down_block,
32
- )
33
- from .unet_2d_condition import UNet2DConditionModel
34
-
35
-
36
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
-
38
-
39
- @dataclass
40
- class ControlNetOutput(BaseOutput):
41
- """
42
- The output of [`ControlNetModel`].
43
-
44
- Args:
45
- down_block_res_samples (`tuple[torch.Tensor]`):
46
- A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should
47
- be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be
48
- used to condition the original UNet's downsampling activations.
49
- mid_down_block_re_sample (`torch.Tensor`):
50
- The activation of the midde block (the lowest sample resolution). Each tensor should be of shape
51
- `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`.
52
- Output can be used to condition the original UNet's middle block activation.
53
- """
54
-
55
- down_block_res_samples: Tuple[torch.Tensor]
56
- mid_block_res_sample: torch.Tensor
57
-
58
-
59
- class ControlNetConditioningEmbedding(nn.Module):
60
- """
61
- Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
62
- [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
63
- training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
64
- convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
65
- (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
66
- model) to encode image-space conditions ... into feature maps ..."
67
- """
68
-
69
- def __init__(
70
- self,
71
- conditioning_embedding_channels: int,
72
- conditioning_channels: int = 3,
73
- block_out_channels: Tuple[int] = (16, 32, 96, 256),
74
- ):
75
- super().__init__()
76
-
77
- self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
78
-
79
- self.blocks = nn.ModuleList([])
80
-
81
- for i in range(len(block_out_channels) - 1):
82
- channel_in = block_out_channels[i]
83
- channel_out = block_out_channels[i + 1]
84
- self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
85
- self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
86
-
87
- self.conv_out = zero_module(
88
- nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
89
- )
90
-
91
- def forward(self, conditioning):
92
- embedding = self.conv_in(conditioning)
93
- embedding = F.silu(embedding)
94
-
95
- for block in self.blocks:
96
- embedding = block(embedding)
97
- embedding = F.silu(embedding)
98
-
99
- embedding = self.conv_out(embedding)
100
-
101
- return embedding
102
-
103
-
104
- class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin):
105
- """
106
- A ControlNet model.
107
-
108
- Args:
109
- in_channels (`int`, defaults to 4):
110
- The number of channels in the input sample.
111
- flip_sin_to_cos (`bool`, defaults to `True`):
112
- Whether to flip the sin to cos in the time embedding.
113
- freq_shift (`int`, defaults to 0):
114
- The frequency shift to apply to the time embedding.
115
- down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
116
- The tuple of downsample blocks to use.
117
- only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`):
118
- block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`):
119
- The tuple of output channels for each block.
120
- layers_per_block (`int`, defaults to 2):
121
- The number of layers per block.
122
- downsample_padding (`int`, defaults to 1):
123
- The padding to use for the downsampling convolution.
124
- mid_block_scale_factor (`float`, defaults to 1):
125
- The scale factor to use for the mid block.
126
- act_fn (`str`, defaults to "silu"):
127
- The activation function to use.
128
- norm_num_groups (`int`, *optional*, defaults to 32):
129
- The number of groups to use for the normalization. If None, normalization and activation layers is skipped
130
- in post-processing.
131
- norm_eps (`float`, defaults to 1e-5):
132
- The epsilon to use for the normalization.
133
- cross_attention_dim (`int`, defaults to 1280):
134
- The dimension of the cross attention features.
135
- transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
136
- The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
137
- [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
138
- [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
139
- encoder_hid_dim (`int`, *optional*, defaults to None):
140
- If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
141
- dimension to `cross_attention_dim`.
142
- encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
143
- If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
144
- embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
145
- attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8):
146
- The dimension of the attention heads.
147
- use_linear_projection (`bool`, defaults to `False`):
148
- class_embed_type (`str`, *optional*, defaults to `None`):
149
- The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None,
150
- `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
151
- addition_embed_type (`str`, *optional*, defaults to `None`):
152
- Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
153
- "text". "text" will use the `TextTimeEmbedding` layer.
154
- num_class_embeds (`int`, *optional*, defaults to 0):
155
- Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
156
- class conditioning with `class_embed_type` equal to `None`.
157
- upcast_attention (`bool`, defaults to `False`):
158
- resnet_time_scale_shift (`str`, defaults to `"default"`):
159
- Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`.
160
- projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`):
161
- The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when
162
- `class_embed_type="projection"`.
163
- controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
164
- The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
165
- conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`):
166
- The tuple of output channel for each block in the `conditioning_embedding` layer.
167
- global_pool_conditions (`bool`, defaults to `False`):
168
- """
169
-
170
- _supports_gradient_checkpointing = True
171
-
172
- @register_to_config
173
- def __init__(
174
- self,
175
- in_channels: int = 4,
176
- conditioning_channels: int = 3,
177
- flip_sin_to_cos: bool = True,
178
- freq_shift: int = 0,
179
- down_block_types: Tuple[str] = (
180
- "CrossAttnDownBlock2D",
181
- "CrossAttnDownBlock2D",
182
- "CrossAttnDownBlock2D",
183
- "DownBlock2D",
184
- ),
185
- only_cross_attention: Union[bool, Tuple[bool]] = False,
186
- block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
187
- layers_per_block: int = 2,
188
- downsample_padding: int = 1,
189
- mid_block_scale_factor: float = 1,
190
- act_fn: str = "silu",
191
- norm_num_groups: Optional[int] = 32,
192
- norm_eps: float = 1e-5,
193
- cross_attention_dim: int = 1280,
194
- transformer_layers_per_block: Union[int, Tuple[int]] = 1,
195
- encoder_hid_dim: Optional[int] = None,
196
- encoder_hid_dim_type: Optional[str] = None,
197
- attention_head_dim: Union[int, Tuple[int]] = 8,
198
- num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
199
- use_linear_projection: bool = False,
200
- class_embed_type: Optional[str] = None,
201
- addition_embed_type: Optional[str] = None,
202
- addition_time_embed_dim: Optional[int] = None,
203
- num_class_embeds: Optional[int] = None,
204
- upcast_attention: bool = False,
205
- resnet_time_scale_shift: str = "default",
206
- projection_class_embeddings_input_dim: Optional[int] = None,
207
- controlnet_conditioning_channel_order: str = "rgb",
208
- conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
209
- global_pool_conditions: bool = False,
210
- addition_embed_type_num_heads=64,
211
- ):
212
- super().__init__()
213
-
214
- # If `num_attention_heads` is not defined (which is the case for most models)
215
- # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
216
- # The reason for this behavior is to correct for incorrectly named variables that were introduced
217
- # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
218
- # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
219
- # which is why we correct for the naming here.
220
- num_attention_heads = num_attention_heads or attention_head_dim
221
-
222
- # Check inputs
223
- if len(block_out_channels) != len(down_block_types):
224
- raise ValueError(
225
- f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
226
- )
227
-
228
- if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
229
- raise ValueError(
230
- f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
231
- )
232
-
233
- if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
234
- raise ValueError(
235
- f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
236
- )
237
-
238
- if isinstance(transformer_layers_per_block, int):
239
- transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
240
-
241
- # input
242
- conv_in_kernel = 3
243
- conv_in_padding = (conv_in_kernel - 1) // 2
244
- self.conv_in = nn.Conv2d(
245
- in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
246
- )
247
-
248
- # time
249
- time_embed_dim = block_out_channels[0] * 4
250
- self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
251
- timestep_input_dim = block_out_channels[0]
252
- self.time_embedding = TimestepEmbedding(
253
- timestep_input_dim,
254
- time_embed_dim,
255
- act_fn=act_fn,
256
- )
257
-
258
- if encoder_hid_dim_type is None and encoder_hid_dim is not None:
259
- encoder_hid_dim_type = "text_proj"
260
- self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
261
- logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
262
-
263
- if encoder_hid_dim is None and encoder_hid_dim_type is not None:
264
- raise ValueError(
265
- f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
266
- )
267
-
268
- if encoder_hid_dim_type == "text_proj":
269
- self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
270
- elif encoder_hid_dim_type == "text_image_proj":
271
- # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
272
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
273
- # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
274
- self.encoder_hid_proj = TextImageProjection(
275
- text_embed_dim=encoder_hid_dim,
276
- image_embed_dim=cross_attention_dim,
277
- cross_attention_dim=cross_attention_dim,
278
- )
279
-
280
- elif encoder_hid_dim_type is not None:
281
- raise ValueError(
282
- f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
283
- )
284
- else:
285
- self.encoder_hid_proj = None
286
-
287
- # class embedding
288
- if class_embed_type is None and num_class_embeds is not None:
289
- self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
290
- elif class_embed_type == "timestep":
291
- self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
292
- elif class_embed_type == "identity":
293
- self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
294
- elif class_embed_type == "projection":
295
- if projection_class_embeddings_input_dim is None:
296
- raise ValueError(
297
- "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
298
- )
299
- # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
300
- # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
301
- # 2. it projects from an arbitrary input dimension.
302
- #
303
- # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
304
- # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
305
- # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
306
- self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
307
- else:
308
- self.class_embedding = None
309
-
310
- if addition_embed_type == "text":
311
- if encoder_hid_dim is not None:
312
- text_time_embedding_from_dim = encoder_hid_dim
313
- else:
314
- text_time_embedding_from_dim = cross_attention_dim
315
-
316
- self.add_embedding = TextTimeEmbedding(
317
- text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
318
- )
319
- elif addition_embed_type == "text_image":
320
- # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
321
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
322
- # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
323
- self.add_embedding = TextImageTimeEmbedding(
324
- text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
325
- )
326
- elif addition_embed_type == "text_time":
327
- self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
328
- self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
329
-
330
- elif addition_embed_type is not None:
331
- raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
332
-
333
- # control net conditioning embedding
334
- self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
335
- conditioning_embedding_channels=block_out_channels[0],
336
- block_out_channels=conditioning_embedding_out_channels,
337
- conditioning_channels=conditioning_channels,
338
- )
339
-
340
- self.down_blocks = nn.ModuleList([])
341
- self.controlnet_down_blocks = nn.ModuleList([])
342
-
343
- if isinstance(only_cross_attention, bool):
344
- only_cross_attention = [only_cross_attention] * len(down_block_types)
345
-
346
- if isinstance(attention_head_dim, int):
347
- attention_head_dim = (attention_head_dim,) * len(down_block_types)
348
-
349
- if isinstance(num_attention_heads, int):
350
- num_attention_heads = (num_attention_heads,) * len(down_block_types)
351
-
352
- # down
353
- output_channel = block_out_channels[0]
354
-
355
- controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
356
- controlnet_block = zero_module(controlnet_block)
357
- self.controlnet_down_blocks.append(controlnet_block)
358
-
359
- for i, down_block_type in enumerate(down_block_types):
360
- input_channel = output_channel
361
- output_channel = block_out_channels[i]
362
- is_final_block = i == len(block_out_channels) - 1
363
-
364
- down_block = get_down_block(
365
- down_block_type,
366
- num_layers=layers_per_block,
367
- transformer_layers_per_block=transformer_layers_per_block[i],
368
- in_channels=input_channel,
369
- out_channels=output_channel,
370
- temb_channels=time_embed_dim,
371
- add_downsample=not is_final_block,
372
- resnet_eps=norm_eps,
373
- resnet_act_fn=act_fn,
374
- resnet_groups=norm_num_groups,
375
- cross_attention_dim=cross_attention_dim,
376
- num_attention_heads=num_attention_heads[i],
377
- attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
378
- downsample_padding=downsample_padding,
379
- use_linear_projection=use_linear_projection,
380
- only_cross_attention=only_cross_attention[i],
381
- upcast_attention=upcast_attention,
382
- resnet_time_scale_shift=resnet_time_scale_shift,
383
- )
384
- self.down_blocks.append(down_block)
385
-
386
- for _ in range(layers_per_block):
387
- controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
388
- controlnet_block = zero_module(controlnet_block)
389
- self.controlnet_down_blocks.append(controlnet_block)
390
-
391
- if not is_final_block:
392
- controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)
393
- controlnet_block = zero_module(controlnet_block)
394
- self.controlnet_down_blocks.append(controlnet_block)
395
-
396
- # mid
397
- mid_block_channel = block_out_channels[-1]
398
-
399
- controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)
400
- controlnet_block = zero_module(controlnet_block)
401
- self.controlnet_mid_block = controlnet_block
402
-
403
- self.mid_block = UNetMidBlock2DCrossAttn(
404
- transformer_layers_per_block=transformer_layers_per_block[-1],
405
- in_channels=mid_block_channel,
406
- temb_channels=time_embed_dim,
407
- resnet_eps=norm_eps,
408
- resnet_act_fn=act_fn,
409
- output_scale_factor=mid_block_scale_factor,
410
- resnet_time_scale_shift=resnet_time_scale_shift,
411
- cross_attention_dim=cross_attention_dim,
412
- num_attention_heads=num_attention_heads[-1],
413
- resnet_groups=norm_num_groups,
414
- use_linear_projection=use_linear_projection,
415
- upcast_attention=upcast_attention,
416
- )
417
-
418
- @classmethod
419
- def from_unet(
420
- cls,
421
- unet: UNet2DConditionModel,
422
- controlnet_conditioning_channel_order: str = "rgb",
423
- conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),
424
- load_weights_from_unet: bool = True,
425
- ):
426
- r"""
427
- Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`].
428
-
429
- Parameters:
430
- unet (`UNet2DConditionModel`):
431
- The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied
432
- where applicable.
433
- """
434
- transformer_layers_per_block = (
435
- unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1
436
- )
437
- encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None
438
- encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None
439
- addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None
440
- addition_time_embed_dim = (
441
- unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None
442
- )
443
-
444
- controlnet = cls(
445
- encoder_hid_dim=encoder_hid_dim,
446
- encoder_hid_dim_type=encoder_hid_dim_type,
447
- addition_embed_type=addition_embed_type,
448
- addition_time_embed_dim=addition_time_embed_dim,
449
- transformer_layers_per_block=transformer_layers_per_block,
450
- in_channels=unet.config.in_channels,
451
- flip_sin_to_cos=unet.config.flip_sin_to_cos,
452
- freq_shift=unet.config.freq_shift,
453
- down_block_types=unet.config.down_block_types,
454
- only_cross_attention=unet.config.only_cross_attention,
455
- block_out_channels=unet.config.block_out_channels,
456
- layers_per_block=unet.config.layers_per_block,
457
- downsample_padding=unet.config.downsample_padding,
458
- mid_block_scale_factor=unet.config.mid_block_scale_factor,
459
- act_fn=unet.config.act_fn,
460
- norm_num_groups=unet.config.norm_num_groups,
461
- norm_eps=unet.config.norm_eps,
462
- cross_attention_dim=unet.config.cross_attention_dim,
463
- attention_head_dim=unet.config.attention_head_dim,
464
- num_attention_heads=unet.config.num_attention_heads,
465
- use_linear_projection=unet.config.use_linear_projection,
466
- class_embed_type=unet.config.class_embed_type,
467
- num_class_embeds=unet.config.num_class_embeds,
468
- upcast_attention=unet.config.upcast_attention,
469
- resnet_time_scale_shift=unet.config.resnet_time_scale_shift,
470
- projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,
471
- controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
472
- conditioning_embedding_out_channels=conditioning_embedding_out_channels,
473
- )
474
-
475
- if load_weights_from_unet:
476
- controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())
477
- controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())
478
- controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())
479
-
480
- if controlnet.class_embedding:
481
- controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())
482
-
483
- controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())
484
- controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())
485
-
486
- return controlnet
487
-
488
- @property
489
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
490
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
491
- r"""
492
- Returns:
493
- `dict` of attention processors: A dictionary containing all attention processors used in the model with
494
- indexed by its weight name.
495
- """
496
- # set recursively
497
- processors = {}
498
-
499
- def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
500
- if hasattr(module, "set_processor"):
501
- processors[f"{name}.processor"] = module.processor
502
-
503
- for sub_name, child in module.named_children():
504
- fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
505
-
506
- return processors
507
-
508
- for name, module in self.named_children():
509
- fn_recursive_add_processors(name, module, processors)
510
-
511
- return processors
512
-
513
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor
514
- def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
515
- r"""
516
- Sets the attention processor to use to compute attention.
517
-
518
- Parameters:
519
- processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
520
- The instantiated processor class or a dictionary of processor classes that will be set as the processor
521
- for **all** `Attention` layers.
522
-
523
- If `processor` is a dict, the key needs to define the path to the corresponding cross attention
524
- processor. This is strongly recommended when setting trainable attention processors.
525
-
526
- """
527
- count = len(self.attn_processors.keys())
528
-
529
- if isinstance(processor, dict) and len(processor) != count:
530
- raise ValueError(
531
- f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
532
- f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
533
- )
534
-
535
- def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
536
- if hasattr(module, "set_processor"):
537
- if not isinstance(processor, dict):
538
- module.set_processor(processor)
539
- else:
540
- module.set_processor(processor.pop(f"{name}.processor"))
541
-
542
- for sub_name, child in module.named_children():
543
- fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
544
-
545
- for name, module in self.named_children():
546
- fn_recursive_attn_processor(name, module, processor)
547
-
548
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
549
- def set_default_attn_processor(self):
550
- """
551
- Disables custom attention processors and sets the default attention implementation.
552
- """
553
- self.set_attn_processor(AttnProcessor())
554
-
555
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice
556
- def set_attention_slice(self, slice_size):
557
- r"""
558
- Enable sliced attention computation.
559
-
560
- When this option is enabled, the attention module splits the input tensor in slices to compute attention in
561
- several steps. This is useful for saving some memory in exchange for a small decrease in speed.
562
-
563
- Args:
564
- slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
565
- When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
566
- `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
567
- provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
568
- must be a multiple of `slice_size`.
569
- """
570
- sliceable_head_dims = []
571
-
572
- def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
573
- if hasattr(module, "set_attention_slice"):
574
- sliceable_head_dims.append(module.sliceable_head_dim)
575
-
576
- for child in module.children():
577
- fn_recursive_retrieve_sliceable_dims(child)
578
-
579
- # retrieve number of attention layers
580
- for module in self.children():
581
- fn_recursive_retrieve_sliceable_dims(module)
582
-
583
- num_sliceable_layers = len(sliceable_head_dims)
584
-
585
- if slice_size == "auto":
586
- # half the attention head size is usually a good trade-off between
587
- # speed and memory
588
- slice_size = [dim // 2 for dim in sliceable_head_dims]
589
- elif slice_size == "max":
590
- # make smallest slice possible
591
- slice_size = num_sliceable_layers * [1]
592
-
593
- slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
594
-
595
- if len(slice_size) != len(sliceable_head_dims):
596
- raise ValueError(
597
- f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
598
- f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
599
- )
600
-
601
- for i in range(len(slice_size)):
602
- size = slice_size[i]
603
- dim = sliceable_head_dims[i]
604
- if size is not None and size > dim:
605
- raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
606
-
607
- # Recursively walk through all the children.
608
- # Any children which exposes the set_attention_slice method
609
- # gets the message
610
- def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
611
- if hasattr(module, "set_attention_slice"):
612
- module.set_attention_slice(slice_size.pop())
613
-
614
- for child in module.children():
615
- fn_recursive_set_attention_slice(child, slice_size)
616
-
617
- reversed_slice_size = list(reversed(slice_size))
618
- for module in self.children():
619
- fn_recursive_set_attention_slice(module, reversed_slice_size)
620
-
621
- def _set_gradient_checkpointing(self, module, value=False):
622
- if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
623
- module.gradient_checkpointing = value
624
-
625
- def forward(
626
- self,
627
- sample: torch.FloatTensor,
628
- timestep: Union[torch.Tensor, float, int],
629
- encoder_hidden_states: torch.Tensor,
630
- controlnet_cond: torch.FloatTensor,
631
- conditioning_scale: float = 1.0,
632
- class_labels: Optional[torch.Tensor] = None,
633
- timestep_cond: Optional[torch.Tensor] = None,
634
- attention_mask: Optional[torch.Tensor] = None,
635
- added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
636
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
637
- guess_mode: bool = False,
638
- return_dict: bool = True,
639
- ) -> Union[ControlNetOutput, Tuple]:
640
- """
641
- The [`ControlNetModel`] forward method.
642
-
643
- Args:
644
- sample (`torch.FloatTensor`):
645
- The noisy input tensor.
646
- timestep (`Union[torch.Tensor, float, int]`):
647
- The number of timesteps to denoise an input.
648
- encoder_hidden_states (`torch.Tensor`):
649
- The encoder hidden states.
650
- controlnet_cond (`torch.FloatTensor`):
651
- The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
652
- conditioning_scale (`float`, defaults to `1.0`):
653
- The scale factor for ControlNet outputs.
654
- class_labels (`torch.Tensor`, *optional*, defaults to `None`):
655
- Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
656
- timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
657
- attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
658
- added_cond_kwargs (`dict`):
659
- Additional conditions for the Stable Diffusion XL UNet.
660
- cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
661
- A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
662
- guess_mode (`bool`, defaults to `False`):
663
- In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if
664
- you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended.
665
- return_dict (`bool`, defaults to `True`):
666
- Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
667
-
668
- Returns:
669
- [`~models.controlnet.ControlNetOutput`] **or** `tuple`:
670
- If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is
671
- returned where the first element is the sample tensor.
672
- """
673
- # check channel order
674
- channel_order = self.config.controlnet_conditioning_channel_order
675
-
676
- if channel_order == "rgb":
677
- # in rgb order by default
678
- ...
679
- elif channel_order == "bgr":
680
- controlnet_cond = torch.flip(controlnet_cond, dims=[1])
681
- else:
682
- raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
683
-
684
- # prepare attention_mask
685
- if attention_mask is not None:
686
- attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
687
- attention_mask = attention_mask.unsqueeze(1)
688
-
689
- # 1. time
690
- timesteps = timestep
691
- if not torch.is_tensor(timesteps):
692
- # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
693
- # This would be a good case for the `match` statement (Python 3.10+)
694
- is_mps = sample.device.type == "mps"
695
- if isinstance(timestep, float):
696
- dtype = torch.float32 if is_mps else torch.float64
697
- else:
698
- dtype = torch.int32 if is_mps else torch.int64
699
- timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
700
- elif len(timesteps.shape) == 0:
701
- timesteps = timesteps[None].to(sample.device)
702
-
703
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
704
- timesteps = timesteps.expand(sample.shape[0])
705
-
706
- t_emb = self.time_proj(timesteps)
707
-
708
- # timesteps does not contain any weights and will always return f32 tensors
709
- # but time_embedding might actually be running in fp16. so we need to cast here.
710
- # there might be better ways to encapsulate this.
711
- t_emb = t_emb.to(dtype=sample.dtype)
712
-
713
- emb = self.time_embedding(t_emb, timestep_cond)
714
- aug_emb = None
715
-
716
- if self.class_embedding is not None:
717
- if class_labels is None:
718
- raise ValueError("class_labels should be provided when num_class_embeds > 0")
719
-
720
- if self.config.class_embed_type == "timestep":
721
- class_labels = self.time_proj(class_labels)
722
-
723
- class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)
724
- emb = emb + class_emb
725
-
726
- if "addition_embed_type" in self.config:
727
- if self.config.addition_embed_type == "text":
728
- aug_emb = self.add_embedding(encoder_hidden_states)
729
-
730
- elif self.config.addition_embed_type == "text_time":
731
- if "text_embeds" not in added_cond_kwargs:
732
- raise ValueError(
733
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
734
- )
735
- text_embeds = added_cond_kwargs.get("text_embeds")
736
- if "time_ids" not in added_cond_kwargs:
737
- raise ValueError(
738
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
739
- )
740
- time_ids = added_cond_kwargs.get("time_ids")
741
- time_embeds = self.add_time_proj(time_ids.flatten())
742
- time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
743
-
744
- add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
745
- add_embeds = add_embeds.to(emb.dtype)
746
- aug_emb = self.add_embedding(add_embeds)
747
-
748
- emb = emb + aug_emb if aug_emb is not None else emb
749
-
750
- # 2. pre-process
751
- sample = self.conv_in(sample)
752
-
753
- controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)
754
- sample = sample + controlnet_cond
755
-
756
- # 3. down
757
- down_block_res_samples = (sample,)
758
- for downsample_block in self.down_blocks:
759
- if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
760
- sample, res_samples = downsample_block(
761
- hidden_states=sample,
762
- temb=emb,
763
- encoder_hidden_states=encoder_hidden_states,
764
- attention_mask=attention_mask,
765
- cross_attention_kwargs=cross_attention_kwargs,
766
- )
767
- else:
768
- sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
769
-
770
- down_block_res_samples += res_samples
771
-
772
- # 4. mid
773
- if self.mid_block is not None:
774
- sample = self.mid_block(
775
- sample,
776
- emb,
777
- encoder_hidden_states=encoder_hidden_states,
778
- attention_mask=attention_mask,
779
- cross_attention_kwargs=cross_attention_kwargs,
780
- )
781
-
782
- # 5. Control net blocks
783
-
784
- controlnet_down_block_res_samples = ()
785
-
786
- for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):
787
- down_block_res_sample = controlnet_block(down_block_res_sample)
788
- controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,)
789
-
790
- down_block_res_samples = controlnet_down_block_res_samples
791
-
792
- mid_block_res_sample = self.controlnet_mid_block(sample)
793
-
794
- # 6. scaling
795
- if guess_mode and not self.config.global_pool_conditions:
796
- scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0
797
-
798
- scales = scales * conditioning_scale
799
- down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)]
800
- mid_block_res_sample = mid_block_res_sample * scales[-1] # last one
801
- else:
802
- down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]
803
- mid_block_res_sample = mid_block_res_sample * conditioning_scale
804
-
805
- if self.config.global_pool_conditions:
806
- down_block_res_samples = [
807
- torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples
808
- ]
809
- mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True)
810
-
811
- if not return_dict:
812
- return (down_block_res_samples, mid_block_res_sample)
813
-
814
- return ControlNetOutput(
815
- down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample
816
- )
817
-
818
-
819
- def zero_module(module):
820
- for p in module.parameters():
821
- nn.init.zeros_(p)
822
- return module
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/pipeline_versatile_diffusion_text_to_image.py DELETED
@@ -1,469 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- import warnings
17
- from typing import Callable, List, Optional, Union
18
-
19
- import torch
20
- import torch.utils.checkpoint
21
- from transformers import CLIPImageProcessor, CLIPTextModelWithProjection, CLIPTokenizer
22
-
23
- from ...image_processor import VaeImageProcessor
24
- from ...models import AutoencoderKL, Transformer2DModel, UNet2DConditionModel
25
- from ...schedulers import KarrasDiffusionSchedulers
26
- from ...utils import logging, randn_tensor
27
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
28
- from .modeling_text_unet import UNetFlatConditionModel
29
-
30
-
31
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
-
33
-
34
- class VersatileDiffusionTextToImagePipeline(DiffusionPipeline):
35
- r"""
36
- Pipeline for text-to-image generation using Versatile Diffusion.
37
-
38
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
39
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
40
-
41
- Parameters:
42
- vqvae ([`VQModel`]):
43
- Vector-quantized (VQ) model to encode and decode images to and from latent representations.
44
- bert ([`LDMBertModel`]):
45
- Text-encoder model based on [`~transformers.BERT`].
46
- tokenizer ([`~transformers.BertTokenizer`]):
47
- A `BertTokenizer` to tokenize text.
48
- unet ([`UNet2DConditionModel`]):
49
- A `UNet2DConditionModel` to denoise the encoded image latents.
50
- scheduler ([`SchedulerMixin`]):
51
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
52
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
53
- """
54
- tokenizer: CLIPTokenizer
55
- image_feature_extractor: CLIPImageProcessor
56
- text_encoder: CLIPTextModelWithProjection
57
- image_unet: UNet2DConditionModel
58
- text_unet: UNetFlatConditionModel
59
- vae: AutoencoderKL
60
- scheduler: KarrasDiffusionSchedulers
61
-
62
- _optional_components = ["text_unet"]
63
-
64
- def __init__(
65
- self,
66
- tokenizer: CLIPTokenizer,
67
- text_encoder: CLIPTextModelWithProjection,
68
- image_unet: UNet2DConditionModel,
69
- text_unet: UNetFlatConditionModel,
70
- vae: AutoencoderKL,
71
- scheduler: KarrasDiffusionSchedulers,
72
- ):
73
- super().__init__()
74
- self.register_modules(
75
- tokenizer=tokenizer,
76
- text_encoder=text_encoder,
77
- image_unet=image_unet,
78
- text_unet=text_unet,
79
- vae=vae,
80
- scheduler=scheduler,
81
- )
82
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
83
- self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
84
-
85
- if self.text_unet is not None:
86
- self._swap_unet_attention_blocks()
87
-
88
- def _swap_unet_attention_blocks(self):
89
- """
90
- Swap the `Transformer2DModel` blocks between the image and text UNets
91
- """
92
- for name, module in self.image_unet.named_modules():
93
- if isinstance(module, Transformer2DModel):
94
- parent_name, index = name.rsplit(".", 1)
95
- index = int(index)
96
- self.image_unet.get_submodule(parent_name)[index], self.text_unet.get_submodule(parent_name)[index] = (
97
- self.text_unet.get_submodule(parent_name)[index],
98
- self.image_unet.get_submodule(parent_name)[index],
99
- )
100
-
101
- def remove_unused_weights(self):
102
- self.register_modules(text_unet=None)
103
-
104
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
105
- r"""
106
- Encodes the prompt into text encoder hidden states.
107
-
108
- Args:
109
- prompt (`str` or `List[str]`):
110
- prompt to be encoded
111
- device: (`torch.device`):
112
- torch device
113
- num_images_per_prompt (`int`):
114
- number of images that should be generated per prompt
115
- do_classifier_free_guidance (`bool`):
116
- whether to use classifier free guidance or not
117
- negative_prompt (`str` or `List[str]`):
118
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
119
- if `guidance_scale` is less than `1`).
120
- """
121
-
122
- def normalize_embeddings(encoder_output):
123
- embeds = self.text_encoder.text_projection(encoder_output.last_hidden_state)
124
- embeds_pooled = encoder_output.text_embeds
125
- embeds = embeds / torch.norm(embeds_pooled.unsqueeze(1), dim=-1, keepdim=True)
126
- return embeds
127
-
128
- batch_size = len(prompt) if isinstance(prompt, list) else 1
129
-
130
- text_inputs = self.tokenizer(
131
- prompt,
132
- padding="max_length",
133
- max_length=self.tokenizer.model_max_length,
134
- truncation=True,
135
- return_tensors="pt",
136
- )
137
- text_input_ids = text_inputs.input_ids
138
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
139
-
140
- if not torch.equal(text_input_ids, untruncated_ids):
141
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
142
- logger.warning(
143
- "The following part of your input was truncated because CLIP can only handle sequences up to"
144
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
145
- )
146
-
147
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
148
- attention_mask = text_inputs.attention_mask.to(device)
149
- else:
150
- attention_mask = None
151
-
152
- prompt_embeds = self.text_encoder(
153
- text_input_ids.to(device),
154
- attention_mask=attention_mask,
155
- )
156
- prompt_embeds = normalize_embeddings(prompt_embeds)
157
-
158
- # duplicate text embeddings for each generation per prompt, using mps friendly method
159
- bs_embed, seq_len, _ = prompt_embeds.shape
160
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
161
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
162
-
163
- # get unconditional embeddings for classifier free guidance
164
- if do_classifier_free_guidance:
165
- uncond_tokens: List[str]
166
- if negative_prompt is None:
167
- uncond_tokens = [""] * batch_size
168
- elif type(prompt) is not type(negative_prompt):
169
- raise TypeError(
170
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
171
- f" {type(prompt)}."
172
- )
173
- elif isinstance(negative_prompt, str):
174
- uncond_tokens = [negative_prompt]
175
- elif batch_size != len(negative_prompt):
176
- raise ValueError(
177
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
178
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
179
- " the batch size of `prompt`."
180
- )
181
- else:
182
- uncond_tokens = negative_prompt
183
-
184
- max_length = text_input_ids.shape[-1]
185
- uncond_input = self.tokenizer(
186
- uncond_tokens,
187
- padding="max_length",
188
- max_length=max_length,
189
- truncation=True,
190
- return_tensors="pt",
191
- )
192
-
193
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
194
- attention_mask = uncond_input.attention_mask.to(device)
195
- else:
196
- attention_mask = None
197
-
198
- negative_prompt_embeds = self.text_encoder(
199
- uncond_input.input_ids.to(device),
200
- attention_mask=attention_mask,
201
- )
202
- negative_prompt_embeds = normalize_embeddings(negative_prompt_embeds)
203
-
204
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
205
- seq_len = negative_prompt_embeds.shape[1]
206
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
207
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
208
-
209
- # For classifier free guidance, we need to do two forward passes.
210
- # Here we concatenate the unconditional and text embeddings into a single batch
211
- # to avoid doing two forward passes
212
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
213
-
214
- return prompt_embeds
215
-
216
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
217
- def decode_latents(self, latents):
218
- warnings.warn(
219
- "The decode_latents method is deprecated and will be removed in a future version. Please"
220
- " use VaeImageProcessor instead",
221
- FutureWarning,
222
- )
223
- latents = 1 / self.vae.config.scaling_factor * latents
224
- image = self.vae.decode(latents, return_dict=False)[0]
225
- image = (image / 2 + 0.5).clamp(0, 1)
226
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
227
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
228
- return image
229
-
230
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
231
- def prepare_extra_step_kwargs(self, generator, eta):
232
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
233
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
234
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
235
- # and should be between [0, 1]
236
-
237
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
238
- extra_step_kwargs = {}
239
- if accepts_eta:
240
- extra_step_kwargs["eta"] = eta
241
-
242
- # check if the scheduler accepts generator
243
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
244
- if accepts_generator:
245
- extra_step_kwargs["generator"] = generator
246
- return extra_step_kwargs
247
-
248
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
249
- def check_inputs(
250
- self,
251
- prompt,
252
- height,
253
- width,
254
- callback_steps,
255
- negative_prompt=None,
256
- prompt_embeds=None,
257
- negative_prompt_embeds=None,
258
- ):
259
- if height % 8 != 0 or width % 8 != 0:
260
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
261
-
262
- if (callback_steps is None) or (
263
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
264
- ):
265
- raise ValueError(
266
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
267
- f" {type(callback_steps)}."
268
- )
269
-
270
- if prompt is not None and prompt_embeds is not None:
271
- raise ValueError(
272
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
273
- " only forward one of the two."
274
- )
275
- elif prompt is None and prompt_embeds is None:
276
- raise ValueError(
277
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
278
- )
279
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
280
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
281
-
282
- if negative_prompt is not None and negative_prompt_embeds is not None:
283
- raise ValueError(
284
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
285
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
286
- )
287
-
288
- if prompt_embeds is not None and negative_prompt_embeds is not None:
289
- if prompt_embeds.shape != negative_prompt_embeds.shape:
290
- raise ValueError(
291
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
292
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
293
- f" {negative_prompt_embeds.shape}."
294
- )
295
-
296
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
297
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
298
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
299
- if isinstance(generator, list) and len(generator) != batch_size:
300
- raise ValueError(
301
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
302
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
303
- )
304
-
305
- if latents is None:
306
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
307
- else:
308
- latents = latents.to(device)
309
-
310
- # scale the initial noise by the standard deviation required by the scheduler
311
- latents = latents * self.scheduler.init_noise_sigma
312
- return latents
313
-
314
- @torch.no_grad()
315
- def __call__(
316
- self,
317
- prompt: Union[str, List[str]],
318
- height: Optional[int] = None,
319
- width: Optional[int] = None,
320
- num_inference_steps: int = 50,
321
- guidance_scale: float = 7.5,
322
- negative_prompt: Optional[Union[str, List[str]]] = None,
323
- num_images_per_prompt: Optional[int] = 1,
324
- eta: float = 0.0,
325
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
326
- latents: Optional[torch.FloatTensor] = None,
327
- output_type: Optional[str] = "pil",
328
- return_dict: bool = True,
329
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
330
- callback_steps: int = 1,
331
- **kwargs,
332
- ):
333
- r"""
334
- The call function to the pipeline for generation.
335
-
336
- Args:
337
- prompt (`str` or `List[str]`):
338
- The prompt or prompts to guide image generation.
339
- height (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`):
340
- The height in pixels of the generated image.
341
- width (`int`, *optional*, defaults to `self.image_unet.config.sample_size * self.vae_scale_factor`):
342
- The width in pixels of the generated image.
343
- num_inference_steps (`int`, *optional*, defaults to 50):
344
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
345
- expense of slower inference.
346
- guidance_scale (`float`, *optional*, defaults to 7.5):
347
- A higher guidance scale value encourages the model to generate images closely linked to the text
348
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
349
- negative_prompt (`str` or `List[str]`, *optional*):
350
- The prompt or prompts to guide what to not include in image generation. If not defined, you need to
351
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
352
- num_images_per_prompt (`int`, *optional*, defaults to 1):
353
- The number of images to generate per prompt.
354
- eta (`float`, *optional*, defaults to 0.0):
355
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
356
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
357
- generator (`torch.Generator`, *optional*):
358
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
359
- generation deterministic.
360
- latents (`torch.FloatTensor`, *optional*):
361
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
362
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
363
- tensor is generated by sampling using the supplied random `generator`.
364
- output_type (`str`, *optional*, defaults to `"pil"`):
365
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
366
- return_dict (`bool`, *optional*, defaults to `True`):
367
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
368
- plain tuple.
369
- callback (`Callable`, *optional*):
370
- A function that calls every `callback_steps` steps during inference. The function is called with the
371
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
372
- callback_steps (`int`, *optional*, defaults to 1):
373
- The frequency at which the `callback` function is called. If not specified, the callback is called at
374
- every step.
375
-
376
- Examples:
377
-
378
- ```py
379
- >>> from diffusers import VersatileDiffusionTextToImagePipeline
380
- >>> import torch
381
-
382
- >>> pipe = VersatileDiffusionTextToImagePipeline.from_pretrained(
383
- ... "shi-labs/versatile-diffusion", torch_dtype=torch.float16
384
- ... )
385
- >>> pipe.remove_unused_weights()
386
- >>> pipe = pipe.to("cuda")
387
-
388
- >>> generator = torch.Generator(device="cuda").manual_seed(0)
389
- >>> image = pipe("an astronaut riding on a horse on mars", generator=generator).images[0]
390
- >>> image.save("./astronaut.png")
391
- ```
392
-
393
- Returns:
394
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
395
- If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
396
- otherwise a `tuple` is returned where the first element is a list with the generated images.
397
- """
398
- # 0. Default height and width to unet
399
- height = height or self.image_unet.config.sample_size * self.vae_scale_factor
400
- width = width or self.image_unet.config.sample_size * self.vae_scale_factor
401
-
402
- # 1. Check inputs. Raise error if not correct
403
- self.check_inputs(prompt, height, width, callback_steps)
404
-
405
- # 2. Define call parameters
406
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
407
- device = self._execution_device
408
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
409
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
410
- # corresponds to doing no classifier free guidance.
411
- do_classifier_free_guidance = guidance_scale > 1.0
412
-
413
- # 3. Encode input prompt
414
- prompt_embeds = self._encode_prompt(
415
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
416
- )
417
-
418
- # 4. Prepare timesteps
419
- self.scheduler.set_timesteps(num_inference_steps, device=device)
420
- timesteps = self.scheduler.timesteps
421
-
422
- # 5. Prepare latent variables
423
- num_channels_latents = self.image_unet.config.in_channels
424
- latents = self.prepare_latents(
425
- batch_size * num_images_per_prompt,
426
- num_channels_latents,
427
- height,
428
- width,
429
- prompt_embeds.dtype,
430
- device,
431
- generator,
432
- latents,
433
- )
434
-
435
- # 6. Prepare extra step kwargs.
436
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
437
-
438
- # 7. Denoising loop
439
- for i, t in enumerate(self.progress_bar(timesteps)):
440
- # expand the latents if we are doing classifier free guidance
441
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
442
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
443
-
444
- # predict the noise residual
445
- noise_pred = self.image_unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
446
-
447
- # perform guidance
448
- if do_classifier_free_guidance:
449
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
450
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
451
-
452
- # compute the previous noisy sample x_t -> x_t-1
453
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
454
-
455
- # call the callback, if provided
456
- if callback is not None and i % callback_steps == 0:
457
- callback(i, t, latents)
458
-
459
- if not output_type == "latent":
460
- image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
461
- else:
462
- image = latents
463
-
464
- image = self.image_processor.postprocess(image, output_type=output_type)
465
-
466
- if not return_dict:
467
- return (image,)
468
-
469
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py DELETED
@@ -1,15 +0,0 @@
1
- _base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
2
-
3
- # learning policy
4
- lr_config = dict(
5
- policy='step',
6
- warmup='linear',
7
- warmup_iters=500,
8
- warmup_ratio=0.001,
9
- step=[60000, 80000])
10
-
11
- # Runner type
12
- runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000)
13
-
14
- checkpoint_config = dict(interval=10000)
15
- evaluation = dict(interval=10000, metric='bbox')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/grid_head.py DELETED
@@ -1,359 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from mmcv.cnn import ConvModule, kaiming_init, normal_init
6
-
7
- from mmdet.models.builder import HEADS, build_loss
8
-
9
-
10
- @HEADS.register_module()
11
- class GridHead(nn.Module):
12
-
13
- def __init__(self,
14
- grid_points=9,
15
- num_convs=8,
16
- roi_feat_size=14,
17
- in_channels=256,
18
- conv_kernel_size=3,
19
- point_feat_channels=64,
20
- deconv_kernel_size=4,
21
- class_agnostic=False,
22
- loss_grid=dict(
23
- type='CrossEntropyLoss', use_sigmoid=True,
24
- loss_weight=15),
25
- conv_cfg=None,
26
- norm_cfg=dict(type='GN', num_groups=36)):
27
- super(GridHead, self).__init__()
28
- self.grid_points = grid_points
29
- self.num_convs = num_convs
30
- self.roi_feat_size = roi_feat_size
31
- self.in_channels = in_channels
32
- self.conv_kernel_size = conv_kernel_size
33
- self.point_feat_channels = point_feat_channels
34
- self.conv_out_channels = self.point_feat_channels * self.grid_points
35
- self.class_agnostic = class_agnostic
36
- self.conv_cfg = conv_cfg
37
- self.norm_cfg = norm_cfg
38
- if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
39
- assert self.conv_out_channels % norm_cfg['num_groups'] == 0
40
-
41
- assert self.grid_points >= 4
42
- self.grid_size = int(np.sqrt(self.grid_points))
43
- if self.grid_size * self.grid_size != self.grid_points:
44
- raise ValueError('grid_points must be a square number')
45
-
46
- # the predicted heatmap is half of whole_map_size
47
- if not isinstance(self.roi_feat_size, int):
48
- raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
49
- self.whole_map_size = self.roi_feat_size * 4
50
-
51
- # compute point-wise sub-regions
52
- self.sub_regions = self.calc_sub_regions()
53
-
54
- self.convs = []
55
- for i in range(self.num_convs):
56
- in_channels = (
57
- self.in_channels if i == 0 else self.conv_out_channels)
58
- stride = 2 if i == 0 else 1
59
- padding = (self.conv_kernel_size - 1) // 2
60
- self.convs.append(
61
- ConvModule(
62
- in_channels,
63
- self.conv_out_channels,
64
- self.conv_kernel_size,
65
- stride=stride,
66
- padding=padding,
67
- conv_cfg=self.conv_cfg,
68
- norm_cfg=self.norm_cfg,
69
- bias=True))
70
- self.convs = nn.Sequential(*self.convs)
71
-
72
- self.deconv1 = nn.ConvTranspose2d(
73
- self.conv_out_channels,
74
- self.conv_out_channels,
75
- kernel_size=deconv_kernel_size,
76
- stride=2,
77
- padding=(deconv_kernel_size - 2) // 2,
78
- groups=grid_points)
79
- self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
80
- self.deconv2 = nn.ConvTranspose2d(
81
- self.conv_out_channels,
82
- grid_points,
83
- kernel_size=deconv_kernel_size,
84
- stride=2,
85
- padding=(deconv_kernel_size - 2) // 2,
86
- groups=grid_points)
87
-
88
- # find the 4-neighbor of each grid point
89
- self.neighbor_points = []
90
- grid_size = self.grid_size
91
- for i in range(grid_size): # i-th column
92
- for j in range(grid_size): # j-th row
93
- neighbors = []
94
- if i > 0: # left: (i - 1, j)
95
- neighbors.append((i - 1) * grid_size + j)
96
- if j > 0: # up: (i, j - 1)
97
- neighbors.append(i * grid_size + j - 1)
98
- if j < grid_size - 1: # down: (i, j + 1)
99
- neighbors.append(i * grid_size + j + 1)
100
- if i < grid_size - 1: # right: (i + 1, j)
101
- neighbors.append((i + 1) * grid_size + j)
102
- self.neighbor_points.append(tuple(neighbors))
103
- # total edges in the grid
104
- self.num_edges = sum([len(p) for p in self.neighbor_points])
105
-
106
- self.forder_trans = nn.ModuleList() # first-order feature transition
107
- self.sorder_trans = nn.ModuleList() # second-order feature transition
108
- for neighbors in self.neighbor_points:
109
- fo_trans = nn.ModuleList()
110
- so_trans = nn.ModuleList()
111
- for _ in range(len(neighbors)):
112
- # each transition module consists of a 5x5 depth-wise conv and
113
- # 1x1 conv.
114
- fo_trans.append(
115
- nn.Sequential(
116
- nn.Conv2d(
117
- self.point_feat_channels,
118
- self.point_feat_channels,
119
- 5,
120
- stride=1,
121
- padding=2,
122
- groups=self.point_feat_channels),
123
- nn.Conv2d(self.point_feat_channels,
124
- self.point_feat_channels, 1)))
125
- so_trans.append(
126
- nn.Sequential(
127
- nn.Conv2d(
128
- self.point_feat_channels,
129
- self.point_feat_channels,
130
- 5,
131
- 1,
132
- 2,
133
- groups=self.point_feat_channels),
134
- nn.Conv2d(self.point_feat_channels,
135
- self.point_feat_channels, 1)))
136
- self.forder_trans.append(fo_trans)
137
- self.sorder_trans.append(so_trans)
138
-
139
- self.loss_grid = build_loss(loss_grid)
140
-
141
- def init_weights(self):
142
- for m in self.modules():
143
- if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
144
- # TODO: compare mode = "fan_in" or "fan_out"
145
- kaiming_init(m)
146
- for m in self.modules():
147
- if isinstance(m, nn.ConvTranspose2d):
148
- normal_init(m, std=0.001)
149
- nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01))
150
-
151
- def forward(self, x):
152
- assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
153
- # RoI feature transformation, downsample 2x
154
- x = self.convs(x)
155
-
156
- c = self.point_feat_channels
157
- # first-order fusion
158
- x_fo = [None for _ in range(self.grid_points)]
159
- for i, points in enumerate(self.neighbor_points):
160
- x_fo[i] = x[:, i * c:(i + 1) * c]
161
- for j, point_idx in enumerate(points):
162
- x_fo[i] = x_fo[i] + self.forder_trans[i][j](
163
- x[:, point_idx * c:(point_idx + 1) * c])
164
-
165
- # second-order fusion
166
- x_so = [None for _ in range(self.grid_points)]
167
- for i, points in enumerate(self.neighbor_points):
168
- x_so[i] = x[:, i * c:(i + 1) * c]
169
- for j, point_idx in enumerate(points):
170
- x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
171
-
172
- # predicted heatmap with fused features
173
- x2 = torch.cat(x_so, dim=1)
174
- x2 = self.deconv1(x2)
175
- x2 = F.relu(self.norm1(x2), inplace=True)
176
- heatmap = self.deconv2(x2)
177
-
178
- # predicted heatmap with original features (applicable during training)
179
- if self.training:
180
- x1 = x
181
- x1 = self.deconv1(x1)
182
- x1 = F.relu(self.norm1(x1), inplace=True)
183
- heatmap_unfused = self.deconv2(x1)
184
- else:
185
- heatmap_unfused = heatmap
186
-
187
- return dict(fused=heatmap, unfused=heatmap_unfused)
188
-
189
- def calc_sub_regions(self):
190
- """Compute point specific representation regions.
191
-
192
- See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
193
- """
194
- # to make it consistent with the original implementation, half_size
195
- # is computed as 2 * quarter_size, which is smaller
196
- half_size = self.whole_map_size // 4 * 2
197
- sub_regions = []
198
- for i in range(self.grid_points):
199
- x_idx = i // self.grid_size
200
- y_idx = i % self.grid_size
201
- if x_idx == 0:
202
- sub_x1 = 0
203
- elif x_idx == self.grid_size - 1:
204
- sub_x1 = half_size
205
- else:
206
- ratio = x_idx / (self.grid_size - 1) - 0.25
207
- sub_x1 = max(int(ratio * self.whole_map_size), 0)
208
-
209
- if y_idx == 0:
210
- sub_y1 = 0
211
- elif y_idx == self.grid_size - 1:
212
- sub_y1 = half_size
213
- else:
214
- ratio = y_idx / (self.grid_size - 1) - 0.25
215
- sub_y1 = max(int(ratio * self.whole_map_size), 0)
216
- sub_regions.append(
217
- (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
218
- return sub_regions
219
-
220
- def get_targets(self, sampling_results, rcnn_train_cfg):
221
- # mix all samples (across images) together.
222
- pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
223
- dim=0).cpu()
224
- pos_gt_bboxes = torch.cat(
225
- [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
226
- assert pos_bboxes.shape == pos_gt_bboxes.shape
227
-
228
- # expand pos_bboxes to 2x of original size
229
- x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
230
- y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
231
- x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
232
- y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
233
- pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
234
- pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
235
- pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
236
-
237
- num_rois = pos_bboxes.shape[0]
238
- map_size = self.whole_map_size
239
- # this is not the final target shape
240
- targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
241
- dtype=torch.float)
242
-
243
- # pre-compute interpolation factors for all grid points.
244
- # the first item is the factor of x-dim, and the second is y-dim.
245
- # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
246
- factors = []
247
- for j in range(self.grid_points):
248
- x_idx = j // self.grid_size
249
- y_idx = j % self.grid_size
250
- factors.append((1 - x_idx / (self.grid_size - 1),
251
- 1 - y_idx / (self.grid_size - 1)))
252
-
253
- radius = rcnn_train_cfg.pos_radius
254
- radius2 = radius**2
255
- for i in range(num_rois):
256
- # ignore small bboxes
257
- if (pos_bbox_ws[i] <= self.grid_size
258
- or pos_bbox_hs[i] <= self.grid_size):
259
- continue
260
- # for each grid point, mark a small circle as positive
261
- for j in range(self.grid_points):
262
- factor_x, factor_y = factors[j]
263
- gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
264
- 1 - factor_x) * pos_gt_bboxes[i, 2]
265
- gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
266
- 1 - factor_y) * pos_gt_bboxes[i, 3]
267
-
268
- cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
269
- map_size)
270
- cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
271
- map_size)
272
-
273
- for x in range(cx - radius, cx + radius + 1):
274
- for y in range(cy - radius, cy + radius + 1):
275
- if x >= 0 and x < map_size and y >= 0 and y < map_size:
276
- if (x - cx)**2 + (y - cy)**2 <= radius2:
277
- targets[i, j, y, x] = 1
278
- # reduce the target heatmap size by a half
279
- # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
280
- sub_targets = []
281
- for i in range(self.grid_points):
282
- sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
283
- sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
284
- sub_targets = torch.cat(sub_targets, dim=1)
285
- sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
286
- return sub_targets
287
-
288
- def loss(self, grid_pred, grid_targets):
289
- loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
290
- loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
291
- loss_grid = loss_fused + loss_unfused
292
- return dict(loss_grid=loss_grid)
293
-
294
- def get_bboxes(self, det_bboxes, grid_pred, img_metas):
295
- # TODO: refactoring
296
- assert det_bboxes.shape[0] == grid_pred.shape[0]
297
- det_bboxes = det_bboxes.cpu()
298
- cls_scores = det_bboxes[:, [4]]
299
- det_bboxes = det_bboxes[:, :4]
300
- grid_pred = grid_pred.sigmoid().cpu()
301
-
302
- R, c, h, w = grid_pred.shape
303
- half_size = self.whole_map_size // 4 * 2
304
- assert h == w == half_size
305
- assert c == self.grid_points
306
-
307
- # find the point with max scores in the half-sized heatmap
308
- grid_pred = grid_pred.view(R * c, h * w)
309
- pred_scores, pred_position = grid_pred.max(dim=1)
310
- xs = pred_position % w
311
- ys = pred_position // w
312
-
313
- # get the position in the whole heatmap instead of half-sized heatmap
314
- for i in range(self.grid_points):
315
- xs[i::self.grid_points] += self.sub_regions[i][0]
316
- ys[i::self.grid_points] += self.sub_regions[i][1]
317
-
318
- # reshape to (num_rois, grid_points)
319
- pred_scores, xs, ys = tuple(
320
- map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
321
-
322
- # get expanded pos_bboxes
323
- widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
324
- heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
325
- x1 = (det_bboxes[:, 0, None] - widths / 2)
326
- y1 = (det_bboxes[:, 1, None] - heights / 2)
327
- # map the grid point to the absolute coordinates
328
- abs_xs = (xs.float() + 0.5) / w * widths + x1
329
- abs_ys = (ys.float() + 0.5) / h * heights + y1
330
-
331
- # get the grid points indices that fall on the bbox boundaries
332
- x1_inds = [i for i in range(self.grid_size)]
333
- y1_inds = [i * self.grid_size for i in range(self.grid_size)]
334
- x2_inds = [
335
- self.grid_points - self.grid_size + i
336
- for i in range(self.grid_size)
337
- ]
338
- y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
339
-
340
- # voting of all grid points on some boundary
341
- bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
342
- dim=1, keepdim=True) / (
343
- pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
344
- bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
345
- dim=1, keepdim=True) / (
346
- pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
347
- bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
348
- dim=1, keepdim=True) / (
349
- pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
350
- bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
351
- dim=1, keepdim=True) / (
352
- pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
353
-
354
- bbox_res = torch.cat(
355
- [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
356
- bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
357
- bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
358
-
359
- return bbox_res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x512_40k_voc12aug.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/gcnet_r50-d8.py',
3
- '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/resnest/README.md DELETED
@@ -1,34 +0,0 @@
1
- # ResNeSt: Split-Attention Networks
2
-
3
- ## Introduction
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ```latex
8
- @article{zhang2020resnest,
9
- title={ResNeSt: Split-Attention Networks},
10
- author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander},
11
- journal={arXiv preprint arXiv:2004.08955},
12
- year={2020}
13
- }
14
- ```
15
-
16
- ## Results and models
17
-
18
- ### Cityscapes
19
-
20
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
21
- | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
22
- | FCN | S-101-D8 | 512x1024 | 80000 | 11.4 | 2.39 | 77.56 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) |
23
- | PSPNet | S-101-D8 | 512x1024 | 80000 | 11.8 | 2.52 | 78.57 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) |
24
- | DeepLabV3 | S-101-D8 | 512x1024 | 80000 | 11.9 | 1.88 | 79.67 | 80.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) |
25
- | DeepLabV3+ | S-101-D8 | 512x1024 | 80000 | 13.2 | 2.36 | 79.62 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) |
26
-
27
- ### ADE20k
28
-
29
- | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
30
- | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
31
- | FCN | S-101-D8 | 512x512 | 160000 | 14.2 | 12.86 | 45.62 | 46.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) |
32
- | PSPNet | S-101-D8 | 512x512 | 160000 | 14.2 | 13.02 | 45.44 | 46.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) |
33
- | DeepLabV3 | S-101-D8 | 512x512 | 160000 | 14.6 | 9.28 | 45.71 | 46.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) |
34
- | DeepLabV3+ | S-101-D8 | 512x512 | 160000 | 16.2 | 11.96 | 46.47 | 47.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth) &#124; [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anew1007/extras/server.py DELETED
@@ -1,964 +0,0 @@
1
- from functools import wraps
2
- from flask import (
3
- Flask,
4
- jsonify,
5
- request,
6
- Response,
7
- render_template_string,
8
- abort,
9
- send_from_directory,
10
- send_file,
11
- )
12
- from flask_cors import CORS
13
- from flask_compress import Compress
14
- import markdown
15
- import argparse
16
- from transformers import AutoTokenizer, AutoProcessor, pipeline
17
- from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM
18
- from transformers import BlipForConditionalGeneration
19
- import unicodedata
20
- import torch
21
- import time
22
- import os
23
- import gc
24
- import sys
25
- import secrets
26
- from PIL import Image
27
- import base64
28
- from io import BytesIO
29
- from random import randint
30
- import webuiapi
31
- import hashlib
32
- from constants import *
33
- from colorama import Fore, Style, init as colorama_init
34
-
35
- colorama_init()
36
-
37
- if sys.hexversion < 0x030b0000:
38
- print(f"{Fore.BLUE}{Style.BRIGHT}Python 3.11 or newer is recommended to run this program.{Style.RESET_ALL}")
39
- time.sleep(2)
40
-
41
- class SplitArgs(argparse.Action):
42
- def __call__(self, parser, namespace, values, option_string=None):
43
- setattr(
44
- namespace, self.dest, values.replace('"', "").replace("'", "").split(",")
45
- )
46
-
47
- #Setting Root Folders for Silero Generations so it is compatible with STSL, should not effect regular runs. - Rolyat
48
- parent_dir = os.path.dirname(os.path.abspath(__file__))
49
- SILERO_SAMPLES_PATH = os.path.join(parent_dir, "tts_samples")
50
- SILERO_SAMPLE_TEXT = os.path.join(parent_dir)
51
-
52
- # Create directories if they don't exist
53
- if not os.path.exists(SILERO_SAMPLES_PATH):
54
- os.makedirs(SILERO_SAMPLES_PATH)
55
- if not os.path.exists(SILERO_SAMPLE_TEXT):
56
- os.makedirs(SILERO_SAMPLE_TEXT)
57
-
58
- # Script arguments
59
- parser = argparse.ArgumentParser(
60
- prog="SillyTavern Extras", description="Web API for transformers models"
61
- )
62
- parser.add_argument(
63
- "--port", type=int, help="Specify the port on which the application is hosted"
64
- )
65
- parser.add_argument(
66
- "--listen", action="store_true", help="Host the app on the local network"
67
- )
68
- parser.add_argument(
69
- "--share", action="store_true", help="Share the app on CloudFlare tunnel"
70
- )
71
- parser.add_argument("--cpu", action="store_true", help="Run the models on the CPU")
72
- parser.add_argument("--cuda", action="store_false", dest="cpu", help="Run the models on the GPU")
73
- parser.add_argument("--cuda-device", help="Specify the CUDA device to use")
74
- parser.add_argument("--mps", "--apple", "--m1", "--m2", action="store_false", dest="cpu", help="Run the models on Apple Silicon")
75
- parser.set_defaults(cpu=True)
76
- parser.add_argument("--summarization-model", help="Load a custom summarization model")
77
- parser.add_argument(
78
- "--classification-model", help="Load a custom text classification model"
79
- )
80
- parser.add_argument("--captioning-model", help="Load a custom captioning model")
81
- parser.add_argument("--embedding-model", help="Load a custom text embedding model")
82
- parser.add_argument("--chroma-host", help="Host IP for a remote ChromaDB instance")
83
- parser.add_argument("--chroma-port", help="HTTP port for a remote ChromaDB instance (defaults to 8000)")
84
- parser.add_argument("--chroma-folder", help="Path for chromadb persistence folder", default='.chroma_db')
85
- parser.add_argument('--chroma-persist', help="ChromaDB persistence", default=True, action=argparse.BooleanOptionalAction)
86
- parser.add_argument(
87
- "--secure", action="store_true", help="Enforces the use of an API key"
88
- )
89
- sd_group = parser.add_mutually_exclusive_group()
90
-
91
- local_sd = sd_group.add_argument_group("sd-local")
92
- local_sd.add_argument("--sd-model", help="Load a custom SD image generation model")
93
- local_sd.add_argument("--sd-cpu", help="Force the SD pipeline to run on the CPU", action="store_true")
94
-
95
- remote_sd = sd_group.add_argument_group("sd-remote")
96
- remote_sd.add_argument(
97
- "--sd-remote", action="store_true", help="Use a remote backend for SD"
98
- )
99
- remote_sd.add_argument(
100
- "--sd-remote-host", type=str, help="Specify the host of the remote SD backend"
101
- )
102
- remote_sd.add_argument(
103
- "--sd-remote-port", type=int, help="Specify the port of the remote SD backend"
104
- )
105
- remote_sd.add_argument(
106
- "--sd-remote-ssl", action="store_true", help="Use SSL for the remote SD backend"
107
- )
108
- remote_sd.add_argument(
109
- "--sd-remote-auth",
110
- type=str,
111
- help="Specify the username:password for the remote SD backend (if required)",
112
- )
113
-
114
- parser.add_argument(
115
- "--enable-modules",
116
- action=SplitArgs,
117
- default=[],
118
- help="Override a list of enabled modules",
119
- )
120
-
121
- args = parser.parse_args()
122
- # [HF, Huggingface] Set port to 7860, set host to remote.
123
- port = 7860
124
- host = "0.0.0.0"
125
- summarization_model = (
126
- args.summarization_model
127
- if args.summarization_model
128
- else DEFAULT_SUMMARIZATION_MODEL
129
- )
130
- classification_model = (
131
- args.classification_model
132
- if args.classification_model
133
- else DEFAULT_CLASSIFICATION_MODEL
134
- )
135
- captioning_model = (
136
- args.captioning_model if args.captioning_model else DEFAULT_CAPTIONING_MODEL
137
- )
138
- embedding_model = (
139
- args.embedding_model if args.embedding_model else DEFAULT_EMBEDDING_MODEL
140
- )
141
-
142
- sd_use_remote = False if args.sd_model else True
143
- sd_model = args.sd_model if args.sd_model else DEFAULT_SD_MODEL
144
- sd_remote_host = args.sd_remote_host if args.sd_remote_host else DEFAULT_REMOTE_SD_HOST
145
- sd_remote_port = args.sd_remote_port if args.sd_remote_port else DEFAULT_REMOTE_SD_PORT
146
- sd_remote_ssl = args.sd_remote_ssl
147
- sd_remote_auth = args.sd_remote_auth
148
-
149
- modules = (
150
- args.enable_modules if args.enable_modules and len(args.enable_modules) > 0 else []
151
- )
152
-
153
- if len(modules) == 0:
154
- print(
155
- f"{Fore.RED}{Style.BRIGHT}You did not select any modules to run! Choose them by adding an --enable-modules option"
156
- )
157
- print(f"Example: --enable-modules=caption,summarize{Style.RESET_ALL}")
158
-
159
- # Models init
160
- cuda_device = DEFAULT_CUDA_DEVICE if not args.cuda_device else args.cuda_device
161
- device_string = cuda_device if torch.cuda.is_available() and not args.cpu else 'mps' if torch.backends.mps.is_available() and not args.cpu else 'cpu'
162
- device = torch.device(device_string)
163
- torch_dtype = torch.float32 if device_string != cuda_device else torch.float16
164
-
165
- if not torch.cuda.is_available() and not args.cpu:
166
- print(f"{Fore.YELLOW}{Style.BRIGHT}torch-cuda is not supported on this device.{Style.RESET_ALL}")
167
- if not torch.backends.mps.is_available() and not args.cpu:
168
- print(f"{Fore.YELLOW}{Style.BRIGHT}torch-mps is not supported on this device.{Style.RESET_ALL}")
169
-
170
-
171
- print(f"{Fore.GREEN}{Style.BRIGHT}Using torch device: {device_string}{Style.RESET_ALL}")
172
-
173
- if "caption" in modules:
174
- print("Initializing an image captioning model...")
175
- captioning_processor = AutoProcessor.from_pretrained(captioning_model)
176
- if "blip" in captioning_model:
177
- captioning_transformer = BlipForConditionalGeneration.from_pretrained(
178
- captioning_model, torch_dtype=torch_dtype
179
- ).to(device)
180
- else:
181
- captioning_transformer = AutoModelForCausalLM.from_pretrained(
182
- captioning_model, torch_dtype=torch_dtype
183
- ).to(device)
184
-
185
- if "summarize" in modules:
186
- print("Initializing a text summarization model...")
187
- summarization_tokenizer = AutoTokenizer.from_pretrained(summarization_model)
188
- summarization_transformer = AutoModelForSeq2SeqLM.from_pretrained(
189
- summarization_model, torch_dtype=torch_dtype
190
- ).to(device)
191
-
192
- if "classify" in modules:
193
- print("Initializing a sentiment classification pipeline...")
194
- classification_pipe = pipeline(
195
- "text-classification",
196
- model=classification_model,
197
- top_k=None,
198
- device=device,
199
- torch_dtype=torch_dtype,
200
- )
201
-
202
- if "sd" in modules and not sd_use_remote:
203
- from diffusers import StableDiffusionPipeline
204
- from diffusers import EulerAncestralDiscreteScheduler
205
-
206
- print("Initializing Stable Diffusion pipeline...")
207
- sd_device_string = cuda_device if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
208
- sd_device = torch.device(sd_device_string)
209
- sd_torch_dtype = torch.float32 if sd_device_string != cuda_device else torch.float16
210
- sd_pipe = StableDiffusionPipeline.from_pretrained(
211
- sd_model, custom_pipeline="lpw_stable_diffusion", torch_dtype=sd_torch_dtype
212
- ).to(sd_device)
213
- sd_pipe.safety_checker = lambda images, clip_input: (images, False)
214
- sd_pipe.enable_attention_slicing()
215
- # pipe.scheduler = KarrasVeScheduler.from_config(pipe.scheduler.config)
216
- sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(
217
- sd_pipe.scheduler.config
218
- )
219
- elif "sd" in modules and sd_use_remote:
220
- print("Initializing Stable Diffusion connection")
221
- try:
222
- sd_remote = webuiapi.WebUIApi(
223
- host=sd_remote_host, port=sd_remote_port, use_https=sd_remote_ssl
224
- )
225
- if sd_remote_auth:
226
- username, password = sd_remote_auth.split(":")
227
- sd_remote.set_auth(username, password)
228
- sd_remote.util_wait_for_ready()
229
- except Exception as e:
230
- # remote sd from modules
231
- print(
232
- f"{Fore.RED}{Style.BRIGHT}Could not connect to remote SD backend at http{'s' if sd_remote_ssl else ''}://{sd_remote_host}:{sd_remote_port}! Disabling SD module...{Style.RESET_ALL}"
233
- )
234
- modules.remove("sd")
235
-
236
- if "tts" in modules:
237
- print("tts module is deprecated. Please use silero-tts instead.")
238
- modules.remove("tts")
239
- modules.append("silero-tts")
240
-
241
-
242
- if "silero-tts" in modules:
243
- if not os.path.exists(SILERO_SAMPLES_PATH):
244
- os.makedirs(SILERO_SAMPLES_PATH)
245
- print("Initializing Silero TTS server")
246
- from silero_api_server import tts
247
-
248
- tts_service = tts.SileroTtsService(SILERO_SAMPLES_PATH)
249
- if len(os.listdir(SILERO_SAMPLES_PATH)) == 0:
250
- print("Generating Silero TTS samples...")
251
- tts_service.update_sample_text(SILERO_SAMPLE_TEXT)
252
- tts_service.generate_samples()
253
-
254
-
255
- if "edge-tts" in modules:
256
- print("Initializing Edge TTS client")
257
- import tts_edge as edge
258
-
259
-
260
- if "chromadb" in modules:
261
- print("Initializing ChromaDB")
262
- import chromadb
263
- import posthog
264
- from chromadb.config import Settings
265
- from sentence_transformers import SentenceTransformer
266
-
267
- # Assume that the user wants in-memory unless a host is specified
268
- # Also disable chromadb telemetry
269
- posthog.capture = lambda *args, **kwargs: None
270
- if args.chroma_host is None:
271
- if args.chroma_persist:
272
- chromadb_client = chromadb.PersistentClient(path=args.chroma_folder, settings=Settings(anonymized_telemetry=False))
273
- print(f"ChromaDB is running in-memory with persistence. Persistence is stored in {args.chroma_folder}. Can be cleared by deleting the folder or purging db.")
274
- else:
275
- chromadb_client = chromadb.EphemeralClient(Settings(anonymized_telemetry=False))
276
- print(f"ChromaDB is running in-memory without persistence.")
277
- else:
278
- chroma_port=(
279
- args.chroma_port if args.chroma_port else DEFAULT_CHROMA_PORT
280
- )
281
- chromadb_client = chromadb.HttpClient(host=args.chroma_host, port=chroma_port, settings=Settings(anonymized_telemetry=False))
282
- print(f"ChromaDB is remotely configured at {args.chroma_host}:{chroma_port}")
283
-
284
- chromadb_embedder = SentenceTransformer(embedding_model, device=device_string)
285
- chromadb_embed_fn = lambda *args, **kwargs: chromadb_embedder.encode(*args, **kwargs).tolist()
286
-
287
- # Check if the db is connected and running, otherwise tell the user
288
- try:
289
- chromadb_client.heartbeat()
290
- print("Successfully pinged ChromaDB! Your client is successfully connected.")
291
- except:
292
- print("Could not ping ChromaDB! If you are running remotely, please check your host and port!")
293
-
294
- # Flask init
295
- app = Flask(__name__)
296
- CORS(app) # allow cross-domain requests
297
- Compress(app) # compress responses
298
- app.config["MAX_CONTENT_LENGTH"] = 100 * 1024 * 1024
299
-
300
-
301
- def require_module(name):
302
- def wrapper(fn):
303
- @wraps(fn)
304
- def decorated_view(*args, **kwargs):
305
- if name not in modules:
306
- abort(403, "Module is disabled by config")
307
- return fn(*args, **kwargs)
308
-
309
- return decorated_view
310
-
311
- return wrapper
312
-
313
-
314
- # AI stuff
315
- def classify_text(text: str) -> list:
316
- output = classification_pipe(
317
- text,
318
- truncation=True,
319
- max_length=classification_pipe.model.config.max_position_embeddings,
320
- )[0]
321
- return sorted(output, key=lambda x: x["score"], reverse=True)
322
-
323
-
324
- def caption_image(raw_image: Image, max_new_tokens: int = 20) -> str:
325
- inputs = captioning_processor(raw_image.convert("RGB"), return_tensors="pt").to(
326
- device, torch_dtype
327
- )
328
- outputs = captioning_transformer.generate(**inputs, max_new_tokens=max_new_tokens)
329
- caption = captioning_processor.decode(outputs[0], skip_special_tokens=True)
330
- return caption
331
-
332
-
333
- def summarize_chunks(text: str, params: dict) -> str:
334
- try:
335
- return summarize(text, params)
336
- except IndexError:
337
- print(
338
- "Sequence length too large for model, cutting text in half and calling again"
339
- )
340
- new_params = params.copy()
341
- new_params["max_length"] = new_params["max_length"] // 2
342
- new_params["min_length"] = new_params["min_length"] // 2
343
- return summarize_chunks(
344
- text[: (len(text) // 2)], new_params
345
- ) + summarize_chunks(text[(len(text) // 2) :], new_params)
346
-
347
-
348
- def summarize(text: str, params: dict) -> str:
349
- # Tokenize input
350
- inputs = summarization_tokenizer(text, return_tensors="pt").to(device)
351
- token_count = len(inputs[0])
352
-
353
- bad_words_ids = [
354
- summarization_tokenizer(bad_word, add_special_tokens=False).input_ids
355
- for bad_word in params["bad_words"]
356
- ]
357
- summary_ids = summarization_transformer.generate(
358
- inputs["input_ids"],
359
- num_beams=2,
360
- max_new_tokens=max(token_count, int(params["max_length"])),
361
- min_new_tokens=min(token_count, int(params["min_length"])),
362
- repetition_penalty=float(params["repetition_penalty"]),
363
- temperature=float(params["temperature"]),
364
- length_penalty=float(params["length_penalty"]),
365
- bad_words_ids=bad_words_ids,
366
- )
367
- summary = summarization_tokenizer.batch_decode(
368
- summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True
369
- )[0]
370
- summary = normalize_string(summary)
371
- return summary
372
-
373
-
374
- def normalize_string(input: str) -> str:
375
- output = " ".join(unicodedata.normalize("NFKC", input).strip().split())
376
- return output
377
-
378
-
379
- def generate_image(data: dict) -> Image:
380
- prompt = normalize_string(f'{data["prompt_prefix"]} {data["prompt"]}')
381
-
382
- if sd_use_remote:
383
- image = sd_remote.txt2img(
384
- prompt=prompt,
385
- negative_prompt=data["negative_prompt"],
386
- sampler_name=data["sampler"],
387
- steps=data["steps"],
388
- cfg_scale=data["scale"],
389
- width=data["width"],
390
- height=data["height"],
391
- restore_faces=data["restore_faces"],
392
- enable_hr=data["enable_hr"],
393
- save_images=True,
394
- send_images=True,
395
- do_not_save_grid=False,
396
- do_not_save_samples=False,
397
- ).image
398
- else:
399
- image = sd_pipe(
400
- prompt=prompt,
401
- negative_prompt=data["negative_prompt"],
402
- num_inference_steps=data["steps"],
403
- guidance_scale=data["scale"],
404
- width=data["width"],
405
- height=data["height"],
406
- ).images[0]
407
-
408
- image.save("./debug.png")
409
- return image
410
-
411
-
412
- def image_to_base64(image: Image, quality: int = 75) -> str:
413
- buffer = BytesIO()
414
- image.convert("RGB")
415
- image.save(buffer, format="JPEG", quality=quality)
416
- img_str = base64.b64encode(buffer.getvalue()).decode("utf-8")
417
- return img_str
418
-
419
-
420
- ignore_auth = []
421
- # [HF, Huggingface] Get password instead of text file.
422
- api_key = os.environ.get("password")
423
-
424
- def is_authorize_ignored(request):
425
- view_func = app.view_functions.get(request.endpoint)
426
-
427
- if view_func is not None:
428
- if view_func in ignore_auth:
429
- return True
430
- return False
431
-
432
- @app.before_request
433
- def before_request():
434
- # Request time measuring
435
- request.start_time = time.time()
436
-
437
- # Checks if an API key is present and valid, otherwise return unauthorized
438
- # The options check is required so CORS doesn't get angry
439
- try:
440
- if request.method != 'OPTIONS' and is_authorize_ignored(request) == False and getattr(request.authorization, 'token', '') != api_key:
441
- print(f"WARNING: Unauthorized API key access from {request.remote_addr}")
442
- if request.method == 'POST':
443
- print(f"Incoming POST request with {request.headers.get('Authorization')}")
444
- response = jsonify({ 'error': '401: Invalid API key' })
445
- response.status_code = 401
446
- return "https://(hf_name)-(space_name).hf.space/"
447
- except Exception as e:
448
- print(f"API key check error: {e}")
449
- return "https://(hf_name)-(space_name).hf.space/"
450
-
451
-
452
- @app.after_request
453
- def after_request(response):
454
- duration = time.time() - request.start_time
455
- response.headers["X-Request-Duration"] = str(duration)
456
- return response
457
-
458
-
459
- @app.route("/", methods=["GET"])
460
- def index():
461
- with open("./README.md", "r", encoding="utf8") as f:
462
- content = f.read()
463
- return render_template_string(markdown.markdown(content, extensions=["tables"]))
464
-
465
-
466
- @app.route("/api/extensions", methods=["GET"])
467
- def get_extensions():
468
- extensions = dict(
469
- {
470
- "extensions": [
471
- {
472
- "name": "not-supported",
473
- "metadata": {
474
- "display_name": """<span style="white-space:break-spaces;">Extensions serving using Extensions API is no longer supported. Please update the mod from: <a href="https://github.com/Cohee1207/SillyTavern">https://github.com/Cohee1207/SillyTavern</a></span>""",
475
- "requires": [],
476
- "assets": [],
477
- },
478
- }
479
- ]
480
- }
481
- )
482
- return jsonify(extensions)
483
-
484
-
485
- @app.route("/api/caption", methods=["POST"])
486
- @require_module("caption")
487
- def api_caption():
488
- data = request.get_json()
489
-
490
- if "image" not in data or not isinstance(data["image"], str):
491
- abort(400, '"image" is required')
492
-
493
- image = Image.open(BytesIO(base64.b64decode(data["image"])))
494
- image = image.convert("RGB")
495
- image.thumbnail((512, 512))
496
- caption = caption_image(image)
497
- thumbnail = image_to_base64(image)
498
- print("Caption:", caption, sep="\n")
499
- gc.collect()
500
- return jsonify({"caption": caption, "thumbnail": thumbnail})
501
-
502
-
503
- @app.route("/api/summarize", methods=["POST"])
504
- @require_module("summarize")
505
- def api_summarize():
506
- data = request.get_json()
507
-
508
- if "text" not in data or not isinstance(data["text"], str):
509
- abort(400, '"text" is required')
510
-
511
- params = DEFAULT_SUMMARIZE_PARAMS.copy()
512
-
513
- if "params" in data and isinstance(data["params"], dict):
514
- params.update(data["params"])
515
-
516
- print("Summary input:", data["text"], sep="\n")
517
- summary = summarize_chunks(data["text"], params)
518
- print("Summary output:", summary, sep="\n")
519
- gc.collect()
520
- return jsonify({"summary": summary})
521
-
522
-
523
- @app.route("/api/classify", methods=["POST"])
524
- @require_module("classify")
525
- def api_classify():
526
- data = request.get_json()
527
-
528
- if "text" not in data or not isinstance(data["text"], str):
529
- abort(400, '"text" is required')
530
-
531
- print("Classification input:", data["text"], sep="\n")
532
- classification = classify_text(data["text"])
533
- print("Classification output:", classification, sep="\n")
534
- gc.collect()
535
- return jsonify({"classification": classification})
536
-
537
-
538
- @app.route("/api/classify/labels", methods=["GET"])
539
- @require_module("classify")
540
- def api_classify_labels():
541
- classification = classify_text("")
542
- labels = [x["label"] for x in classification]
543
- return jsonify({"labels": labels})
544
-
545
-
546
- @app.route("/api/image", methods=["POST"])
547
- @require_module("sd")
548
- def api_image():
549
- required_fields = {
550
- "prompt": str,
551
- }
552
-
553
- optional_fields = {
554
- "steps": 30,
555
- "scale": 6,
556
- "sampler": "DDIM",
557
- "width": 512,
558
- "height": 512,
559
- "restore_faces": False,
560
- "enable_hr": False,
561
- "prompt_prefix": PROMPT_PREFIX,
562
- "negative_prompt": NEGATIVE_PROMPT,
563
- }
564
-
565
- data = request.get_json()
566
-
567
- # Check required fields
568
- for field, field_type in required_fields.items():
569
- if field not in data or not isinstance(data[field], field_type):
570
- abort(400, f'"{field}" is required')
571
-
572
- # Set optional fields to default values if not provided
573
- for field, default_value in optional_fields.items():
574
- type_match = (
575
- (int, float)
576
- if isinstance(default_value, (int, float))
577
- else type(default_value)
578
- )
579
- if field not in data or not isinstance(data[field], type_match):
580
- data[field] = default_value
581
-
582
- try:
583
- print("SD inputs:", data, sep="\n")
584
- image = generate_image(data)
585
- base64image = image_to_base64(image, quality=90)
586
- return jsonify({"image": base64image})
587
- except RuntimeError as e:
588
- abort(400, str(e))
589
-
590
-
591
- @app.route("/api/image/model", methods=["POST"])
592
- @require_module("sd")
593
- def api_image_model_set():
594
- data = request.get_json()
595
-
596
- if not sd_use_remote:
597
- abort(400, "Changing model for local sd is not supported.")
598
- if "model" not in data or not isinstance(data["model"], str):
599
- abort(400, '"model" is required')
600
-
601
- old_model = sd_remote.util_get_current_model()
602
- sd_remote.util_set_model(data["model"], find_closest=False)
603
- # sd_remote.util_set_model(data['model'])
604
- sd_remote.util_wait_for_ready()
605
- new_model = sd_remote.util_get_current_model()
606
-
607
- return jsonify({"previous_model": old_model, "current_model": new_model})
608
-
609
-
610
- @app.route("/api/image/model", methods=["GET"])
611
- @require_module("sd")
612
- def api_image_model_get():
613
- model = sd_model
614
-
615
- if sd_use_remote:
616
- model = sd_remote.util_get_current_model()
617
-
618
- return jsonify({"model": model})
619
-
620
-
621
- @app.route("/api/image/models", methods=["GET"])
622
- @require_module("sd")
623
- def api_image_models():
624
- models = [sd_model]
625
-
626
- if sd_use_remote:
627
- models = sd_remote.util_get_model_names()
628
-
629
- return jsonify({"models": models})
630
-
631
-
632
- @app.route("/api/image/samplers", methods=["GET"])
633
- @require_module("sd")
634
- def api_image_samplers():
635
- samplers = ["Euler a"]
636
-
637
- if sd_use_remote:
638
- samplers = [sampler["name"] for sampler in sd_remote.get_samplers()]
639
-
640
- return jsonify({"samplers": samplers})
641
-
642
-
643
- @app.route("/api/modules", methods=["GET"])
644
- def get_modules():
645
- return jsonify({"modules": modules})
646
-
647
-
648
- @app.route("/api/tts/speakers", methods=["GET"])
649
- @require_module("silero-tts")
650
- def tts_speakers():
651
- voices = [
652
- {
653
- "name": speaker,
654
- "voice_id": speaker,
655
- "preview_url": f"{str(request.url_root)}api/tts/sample/{speaker}",
656
- }
657
- for speaker in tts_service.get_speakers()
658
- ]
659
- return jsonify(voices)
660
-
661
- # Added fix for Silero not working as new files were unable to be created if one already existed. - Rolyat 7/7/23
662
- @app.route("/api/tts/generate", methods=["POST"])
663
- @require_module("silero-tts")
664
- def tts_generate():
665
- voice = request.get_json()
666
- if "text" not in voice or not isinstance(voice["text"], str):
667
- abort(400, '"text" is required')
668
- if "speaker" not in voice or not isinstance(voice["speaker"], str):
669
- abort(400, '"speaker" is required')
670
- # Remove asterisks
671
- voice["text"] = voice["text"].replace("*", "")
672
- try:
673
- # Remove the destination file if it already exists
674
- if os.path.exists('test.wav'):
675
- os.remove('test.wav')
676
-
677
- audio = tts_service.generate(voice["speaker"], voice["text"])
678
- audio_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.basename(audio))
679
-
680
- os.rename(audio, audio_file_path)
681
- return send_file(audio_file_path, mimetype="audio/x-wav")
682
- except Exception as e:
683
- print(e)
684
- abort(500, voice["speaker"])
685
-
686
-
687
- @app.route("/api/tts/sample/<speaker>", methods=["GET"])
688
- @require_module("silero-tts")
689
- def tts_play_sample(speaker: str):
690
- return send_from_directory(SILERO_SAMPLES_PATH, f"{speaker}.wav")
691
-
692
-
693
- @app.route("/api/edge-tts/list", methods=["GET"])
694
- @require_module("edge-tts")
695
- def edge_tts_list():
696
- voices = edge.get_voices()
697
- return jsonify(voices)
698
-
699
-
700
- @app.route("/api/edge-tts/generate", methods=["POST"])
701
- @require_module("edge-tts")
702
- def edge_tts_generate():
703
- data = request.get_json()
704
- if "text" not in data or not isinstance(data["text"], str):
705
- abort(400, '"text" is required')
706
- if "voice" not in data or not isinstance(data["voice"], str):
707
- abort(400, '"voice" is required')
708
- if "rate" in data and isinstance(data['rate'], int):
709
- rate = data['rate']
710
- else:
711
- rate = 0
712
- # Remove asterisks
713
- data["text"] = data["text"].replace("*", "")
714
- try:
715
- audio = edge.generate_audio(text=data["text"], voice=data["voice"], rate=rate)
716
- return Response(audio, mimetype="audio/mpeg")
717
- except Exception as e:
718
- print(e)
719
- abort(500, data["voice"])
720
-
721
-
722
- @app.route("/api/chromadb", methods=["POST"])
723
- @require_module("chromadb")
724
- def chromadb_add_messages():
725
- data = request.get_json()
726
- if "chat_id" not in data or not isinstance(data["chat_id"], str):
727
- abort(400, '"chat_id" is required')
728
- if "messages" not in data or not isinstance(data["messages"], list):
729
- abort(400, '"messages" is required')
730
-
731
- chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
732
- collection = chromadb_client.get_or_create_collection(
733
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
734
- )
735
-
736
- documents = [m["content"] for m in data["messages"]]
737
- ids = [m["id"] for m in data["messages"]]
738
- metadatas = [
739
- {"role": m["role"], "date": m["date"], "meta": m.get("meta", "")}
740
- for m in data["messages"]
741
- ]
742
-
743
- collection.upsert(
744
- ids=ids,
745
- documents=documents,
746
- metadatas=metadatas,
747
- )
748
-
749
- return jsonify({"count": len(ids)})
750
-
751
-
752
- @app.route("/api/chromadb/purge", methods=["POST"])
753
- @require_module("chromadb")
754
- def chromadb_purge():
755
- data = request.get_json()
756
- if "chat_id" not in data or not isinstance(data["chat_id"], str):
757
- abort(400, '"chat_id" is required')
758
-
759
- chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
760
- collection = chromadb_client.get_or_create_collection(
761
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
762
- )
763
-
764
- count = collection.count()
765
- collection.delete()
766
- print("ChromaDB embeddings deleted", count)
767
- return 'Ok', 200
768
-
769
-
770
- @app.route("/api/chromadb/query", methods=["POST"])
771
- @require_module("chromadb")
772
- def chromadb_query():
773
- data = request.get_json()
774
- if "chat_id" not in data or not isinstance(data["chat_id"], str):
775
- abort(400, '"chat_id" is required')
776
- if "query" not in data or not isinstance(data["query"], str):
777
- abort(400, '"query" is required')
778
-
779
- if "n_results" not in data or not isinstance(data["n_results"], int):
780
- n_results = 1
781
- else:
782
- n_results = data["n_results"]
783
-
784
- chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
785
- collection = chromadb_client.get_or_create_collection(
786
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
787
- )
788
-
789
- if collection.count() == 0:
790
- print(f"Queried empty/missing collection for {repr(data['chat_id'])}.")
791
- return jsonify([])
792
-
793
-
794
- n_results = min(collection.count(), n_results)
795
- query_result = collection.query(
796
- query_texts=[data["query"]],
797
- n_results=n_results,
798
- )
799
-
800
- documents = query_result["documents"][0]
801
- ids = query_result["ids"][0]
802
- metadatas = query_result["metadatas"][0]
803
- distances = query_result["distances"][0]
804
-
805
- messages = [
806
- {
807
- "id": ids[i],
808
- "date": metadatas[i]["date"],
809
- "role": metadatas[i]["role"],
810
- "meta": metadatas[i]["meta"],
811
- "content": documents[i],
812
- "distance": distances[i],
813
- }
814
- for i in range(len(ids))
815
- ]
816
-
817
- return jsonify(messages)
818
-
819
- @app.route("/api/chromadb/multiquery", methods=["POST"])
820
- @require_module("chromadb")
821
- def chromadb_multiquery():
822
- data = request.get_json()
823
- if "chat_list" not in data or not isinstance(data["chat_list"], list):
824
- abort(400, '"chat_list" is required and should be a list')
825
- if "query" not in data or not isinstance(data["query"], str):
826
- abort(400, '"query" is required')
827
-
828
- if "n_results" not in data or not isinstance(data["n_results"], int):
829
- n_results = 1
830
- else:
831
- n_results = data["n_results"]
832
-
833
- messages = []
834
-
835
- for chat_id in data["chat_list"]:
836
- if not isinstance(chat_id, str):
837
- continue
838
-
839
- try:
840
- chat_id_md5 = hashlib.md5(chat_id.encode()).hexdigest()
841
- collection = chromadb_client.get_collection(
842
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
843
- )
844
-
845
- # Skip this chat if the collection is empty
846
- if collection.count() == 0:
847
- continue
848
-
849
- n_results_per_chat = min(collection.count(), n_results)
850
- query_result = collection.query(
851
- query_texts=[data["query"]],
852
- n_results=n_results_per_chat,
853
- )
854
- documents = query_result["documents"][0]
855
- ids = query_result["ids"][0]
856
- metadatas = query_result["metadatas"][0]
857
- distances = query_result["distances"][0]
858
-
859
- chat_messages = [
860
- {
861
- "id": ids[i],
862
- "date": metadatas[i]["date"],
863
- "role": metadatas[i]["role"],
864
- "meta": metadatas[i]["meta"],
865
- "content": documents[i],
866
- "distance": distances[i],
867
- }
868
- for i in range(len(ids))
869
- ]
870
-
871
- messages.extend(chat_messages)
872
- except Exception as e:
873
- print(e)
874
-
875
- #remove duplicate msgs, filter down to the right number
876
- seen = set()
877
- messages = [d for d in messages if not (d['content'] in seen or seen.add(d['content']))]
878
- messages = sorted(messages, key=lambda x: x['distance'])[0:n_results]
879
-
880
- return jsonify(messages)
881
-
882
-
883
- @app.route("/api/chromadb/export", methods=["POST"])
884
- @require_module("chromadb")
885
- def chromadb_export():
886
- data = request.get_json()
887
- if "chat_id" not in data or not isinstance(data["chat_id"], str):
888
- abort(400, '"chat_id" is required')
889
-
890
- chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
891
- try:
892
- collection = chromadb_client.get_collection(
893
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
894
- )
895
- except Exception as e:
896
- print(e)
897
- abort(400, "Chat collection not found in chromadb")
898
-
899
- collection_content = collection.get()
900
- documents = collection_content.get('documents', [])
901
- ids = collection_content.get('ids', [])
902
- metadatas = collection_content.get('metadatas', [])
903
-
904
- unsorted_content = [
905
- {
906
- "id": ids[i],
907
- "metadata": metadatas[i],
908
- "document": documents[i],
909
- }
910
- for i in range(len(ids))
911
- ]
912
-
913
- sorted_content = sorted(unsorted_content, key=lambda x: x['metadata']['date'])
914
-
915
- export = {
916
- "chat_id": data["chat_id"],
917
- "content": sorted_content
918
- }
919
-
920
- return jsonify(export)
921
-
922
- @app.route("/api/chromadb/import", methods=["POST"])
923
- @require_module("chromadb")
924
- def chromadb_import():
925
- data = request.get_json()
926
- content = data['content']
927
- if "chat_id" not in data or not isinstance(data["chat_id"], str):
928
- abort(400, '"chat_id" is required')
929
-
930
- chat_id_md5 = hashlib.md5(data["chat_id"].encode()).hexdigest()
931
- collection = chromadb_client.get_or_create_collection(
932
- name=f"chat-{chat_id_md5}", embedding_function=chromadb_embed_fn
933
- )
934
-
935
- documents = [item['document'] for item in content]
936
- metadatas = [item['metadata'] for item in content]
937
- ids = [item['id'] for item in content]
938
-
939
-
940
- collection.upsert(documents=documents, metadatas=metadatas, ids=ids)
941
- print(f"Imported {len(ids)} (total {collection.count()}) content entries into {repr(data['chat_id'])}")
942
-
943
- return jsonify({"count": len(ids)})
944
-
945
-
946
- if args.share:
947
- from flask_cloudflared import _run_cloudflared
948
- import inspect
949
-
950
- sig = inspect.signature(_run_cloudflared)
951
- sum = sum(
952
- 1
953
- for param in sig.parameters.values()
954
- if param.kind == param.POSITIONAL_OR_KEYWORD
955
- )
956
- if sum > 1:
957
- metrics_port = randint(8100, 9000)
958
- cloudflare = _run_cloudflared(port, metrics_port)
959
- else:
960
- cloudflare = _run_cloudflared(port)
961
- print("Running on", cloudflare)
962
-
963
- ignore_auth.append(tts_play_sample)
964
- app.run(host=host, port=port)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/handlers/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from .base import BaseFileHandler
3
- from .json_handler import JsonHandler
4
- from .pickle_handler import PickleHandler
5
- from .yaml_handler import YamlHandler
6
-
7
- __all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler']
 
 
 
 
 
 
 
 
spaces/Arnaudding001/OpenAI_whisperLive/app-shared.py DELETED
@@ -1,3 +0,0 @@
1
- # Run the app with no audio file restrictions
2
- from app import create_ui
3
- create_ui(-1, share=True)
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/models/wheel.py DELETED
@@ -1,92 +0,0 @@
1
- """Represents a wheel file and provides access to the various parts of the
2
- name that have meaning.
3
- """
4
- import re
5
- from typing import Dict, Iterable, List
6
-
7
- from pip._vendor.packaging.tags import Tag
8
-
9
- from pip._internal.exceptions import InvalidWheelFilename
10
-
11
-
12
- class Wheel:
13
- """A wheel file"""
14
-
15
- wheel_file_re = re.compile(
16
- r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
17
- ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
18
- \.whl|\.dist-info)$""",
19
- re.VERBOSE,
20
- )
21
-
22
- def __init__(self, filename: str) -> None:
23
- """
24
- :raises InvalidWheelFilename: when the filename is invalid for a wheel
25
- """
26
- wheel_info = self.wheel_file_re.match(filename)
27
- if not wheel_info:
28
- raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
29
- self.filename = filename
30
- self.name = wheel_info.group("name").replace("_", "-")
31
- # we'll assume "_" means "-" due to wheel naming scheme
32
- # (https://github.com/pypa/pip/issues/1150)
33
- self.version = wheel_info.group("ver").replace("_", "-")
34
- self.build_tag = wheel_info.group("build")
35
- self.pyversions = wheel_info.group("pyver").split(".")
36
- self.abis = wheel_info.group("abi").split(".")
37
- self.plats = wheel_info.group("plat").split(".")
38
-
39
- # All the tag combinations from this file
40
- self.file_tags = {
41
- Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
42
- }
43
-
44
- def get_formatted_file_tags(self) -> List[str]:
45
- """Return the wheel's tags as a sorted list of strings."""
46
- return sorted(str(tag) for tag in self.file_tags)
47
-
48
- def support_index_min(self, tags: List[Tag]) -> int:
49
- """Return the lowest index that one of the wheel's file_tag combinations
50
- achieves in the given list of supported tags.
51
-
52
- For example, if there are 8 supported tags and one of the file tags
53
- is first in the list, then return 0.
54
-
55
- :param tags: the PEP 425 tags to check the wheel against, in order
56
- with most preferred first.
57
-
58
- :raises ValueError: If none of the wheel's file tags match one of
59
- the supported tags.
60
- """
61
- try:
62
- return next(i for i, t in enumerate(tags) if t in self.file_tags)
63
- except StopIteration:
64
- raise ValueError()
65
-
66
- def find_most_preferred_tag(
67
- self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
68
- ) -> int:
69
- """Return the priority of the most preferred tag that one of the wheel's file
70
- tag combinations achieves in the given list of supported tags using the given
71
- tag_to_priority mapping, where lower priorities are more-preferred.
72
-
73
- This is used in place of support_index_min in some cases in order to avoid
74
- an expensive linear scan of a large list of tags.
75
-
76
- :param tags: the PEP 425 tags to check the wheel against.
77
- :param tag_to_priority: a mapping from tag to priority of that tag, where
78
- lower is more preferred.
79
-
80
- :raises ValueError: If none of the wheel's file tags match one of
81
- the supported tags.
82
- """
83
- return min(
84
- tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
85
- )
86
-
87
- def supported(self, tags: Iterable[Tag]) -> bool:
88
- """Return whether the wheel is compatible with one of the given tags.
89
-
90
- :param tags: the PEP 425 tags to check the wheel against.
91
- """
92
- return not self.file_tags.isdisjoint(tags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AyakuraMei/Real-CUGAN/app.py DELETED
@@ -1,62 +0,0 @@
1
- from upcunet_v3 import RealWaifuUpScaler
2
- import gradio as gr
3
- import time
4
- import logging
5
- import os
6
- from PIL import ImageOps
7
- import numpy as np
8
- import math
9
-
10
-
11
- def greet(input_img, input_model_name, input_tile_mode):
12
- # if input_img.size[0] * input_img.size[1] > 256 * 256:
13
- # y = int(math.sqrt(256*256/input_img.size[0]*input_img.size[1]))
14
- # x = int(input_img.size[0]/input_img.size[1]*y)
15
- # input_img = ImageOps.fit(input_img, (x, y))
16
- input_img = np.array(input_img)
17
- if input_model_name not in model_cache:
18
- t1 = time.time()
19
- upscaler = RealWaifuUpScaler(input_model_name[2], ModelPath + input_model_name, half=False, device="cpu")
20
- t2 = time.time()
21
- logger.info(f'load model time, {t2 - t1}')
22
- model_cache[input_model_name] = upscaler
23
- else:
24
- upscaler = model_cache[input_model_name]
25
- logger.info(f'load model from cache')
26
-
27
- start = time.time()
28
- result = upscaler(input_img, tile_mode=input_tile_mode)
29
- end = time.time()
30
- logger.info(f'input_model_name, {input_model_name}')
31
- logger.info(f'input_tile_mode, {input_tile_mode}')
32
- logger.info(f'input shape, {input_img.shape}')
33
- logger.info(f'output shape, {result.shape}')
34
- logger.info(f'speed time, {end - start}')
35
- return result
36
-
37
-
38
- if __name__ == '__main__':
39
- logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(process)d] [%(levelname)s] %(message)s")
40
- logger = logging.getLogger()
41
-
42
- ModelPath = "weights_v3/"
43
- model_cache = {}
44
-
45
- input_model_name = gr.inputs.Dropdown(os.listdir(ModelPath), default="up2x-latest-denoise2x.pth", label='选择model')
46
- input_tile_mode = gr.inputs.Dropdown([0, 1, 2, 3, 4], default=2, label='选择tile_mode')
47
- input_img = gr.inputs.Image(label='image', type='pil')
48
-
49
- inputs = [input_img, input_model_name, input_tile_mode]
50
- outputs = "image"
51
- iface = gr.Interface(fn=greet,
52
- inputs=inputs,
53
- outputs=outputs,
54
- allow_screenshot=False,
55
- allow_flagging='never',
56
- examples=[['test-img.jpg', "up2x-latest-denoise2x.pth", 2]],
57
- article='[https://github.com/bilibili/ailab/tree/main/Real-CUGAN](https://github.com/bilibili/ailab/tree/main/Real-CUGAN)<br>'
58
- '感谢b站开源的项目,图片过大会导致内存不足,所有我将图片裁剪小,想体验大图片的效果请自行前往上面的链接。<br>'
59
- '修改bbb'
60
- 'The large image will lead to memory limit exceeded. So I crop and resize image. '
61
- 'If you want to experience the large image, please go to the link above.')
62
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzinZ/vitscn/text/mandarin.py DELETED
@@ -1,48 +0,0 @@
1
- import re
2
- import cn2an
3
- from pypinyin import lazy_pinyin, Style
4
- import jieba
5
- import zhon
6
- from text.symbols import symbols
7
-
8
- _puntuation_map = [(re.compile('%s' % x[0]), x[1]) for x in [
9
- ('。', '.'),
10
- ('。', '.'),
11
- ('.', ''),
12
- ('?', '?'),
13
- ('!', '!'),
14
- (',', ','),
15
- ]]
16
-
17
-
18
- def number_to_chinese(text):
19
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
20
- for number in numbers:
21
- text = text.replace(number, cn2an.an2cn(number), 1)
22
- return text
23
-
24
- def remove_non_stop_punctuation(text):
25
- text = re.sub('[%s]' % zhon.hanzi.non_stops, '', text)
26
- return text
27
-
28
- def map_stop_puntuation(text):
29
- for regex, replacement in _puntuation_map:
30
- text = re.sub(regex, replacement, text)
31
- return text
32
-
33
- def chinese_to_pinyin(text):
34
- text = map_stop_puntuation(text)
35
- text = number_to_chinese(text)
36
- text = remove_non_stop_punctuation(text)
37
- words = jieba.lcut(text, cut_all=False)
38
- text = ''
39
- for word in words:
40
- if not re.search('[\u4e00-\u9fff]', word):
41
- if word in ''.join(symbols):
42
- text += word
43
- continue
44
- pinyin = lazy_pinyin(word, Style.TONE3)
45
- if text != '':
46
- text += ' '
47
- text += ''.join(pinyin)
48
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzinZ/vitscn/text/symbols.py DELETED
@@ -1,21 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Defines the set of symbols used in text input to the model.
5
- '''
6
- # _pad = '_'
7
- # _punctuation = ';:,.!?¡¿—…"«»“” '
8
- # _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
9
- # _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
10
-
11
- # For Chinese
12
- _pad = '_'
13
- _punctuation = '~;:,.!?¡¿—…"«»“” '
14
- _letters = 'abcdefghijklmnopqrstuvwxyz1234'
15
- _letters_ipa = ""
16
-
17
- # Export all symbols:
18
- symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa)
19
-
20
- # Special symbol ids
21
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/models.py DELETED
@@ -1,1144 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from lib.infer_pack import modules
7
- from lib.infer_pack import attentions
8
- from lib.infer_pack import commons
9
- from lib.infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from lib.infer_pack.commons import init_weights
13
- import numpy as np
14
- from lib.infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder768(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(768, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- stats = self.proj(x) * x_mask
106
-
107
- m, logs = torch.split(stats, self.out_channels, dim=1)
108
- return m, logs, x_mask
109
-
110
-
111
- class ResidualCouplingBlock(nn.Module):
112
- def __init__(
113
- self,
114
- channels,
115
- hidden_channels,
116
- kernel_size,
117
- dilation_rate,
118
- n_layers,
119
- n_flows=4,
120
- gin_channels=0,
121
- ):
122
- super().__init__()
123
- self.channels = channels
124
- self.hidden_channels = hidden_channels
125
- self.kernel_size = kernel_size
126
- self.dilation_rate = dilation_rate
127
- self.n_layers = n_layers
128
- self.n_flows = n_flows
129
- self.gin_channels = gin_channels
130
-
131
- self.flows = nn.ModuleList()
132
- for i in range(n_flows):
133
- self.flows.append(
134
- modules.ResidualCouplingLayer(
135
- channels,
136
- hidden_channels,
137
- kernel_size,
138
- dilation_rate,
139
- n_layers,
140
- gin_channels=gin_channels,
141
- mean_only=True,
142
- )
143
- )
144
- self.flows.append(modules.Flip())
145
-
146
- def forward(self, x, x_mask, g=None, reverse=False):
147
- if not reverse:
148
- for flow in self.flows:
149
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
150
- else:
151
- for flow in reversed(self.flows):
152
- x = flow(x, x_mask, g=g, reverse=reverse)
153
- return x
154
-
155
- def remove_weight_norm(self):
156
- for i in range(self.n_flows):
157
- self.flows[i * 2].remove_weight_norm()
158
-
159
-
160
- class PosteriorEncoder(nn.Module):
161
- def __init__(
162
- self,
163
- in_channels,
164
- out_channels,
165
- hidden_channels,
166
- kernel_size,
167
- dilation_rate,
168
- n_layers,
169
- gin_channels=0,
170
- ):
171
- super().__init__()
172
- self.in_channels = in_channels
173
- self.out_channels = out_channels
174
- self.hidden_channels = hidden_channels
175
- self.kernel_size = kernel_size
176
- self.dilation_rate = dilation_rate
177
- self.n_layers = n_layers
178
- self.gin_channels = gin_channels
179
-
180
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
181
- self.enc = modules.WN(
182
- hidden_channels,
183
- kernel_size,
184
- dilation_rate,
185
- n_layers,
186
- gin_channels=gin_channels,
187
- )
188
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
189
-
190
- def forward(self, x, x_lengths, g=None):
191
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
192
- x.dtype
193
- )
194
- x = self.pre(x) * x_mask
195
- x = self.enc(x, x_mask, g=g)
196
- stats = self.proj(x) * x_mask
197
- m, logs = torch.split(stats, self.out_channels, dim=1)
198
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
199
- return z, m, logs, x_mask
200
-
201
- def remove_weight_norm(self):
202
- self.enc.remove_weight_norm()
203
-
204
-
205
- class Generator(torch.nn.Module):
206
- def __init__(
207
- self,
208
- initial_channel,
209
- resblock,
210
- resblock_kernel_sizes,
211
- resblock_dilation_sizes,
212
- upsample_rates,
213
- upsample_initial_channel,
214
- upsample_kernel_sizes,
215
- gin_channels=0,
216
- ):
217
- super(Generator, self).__init__()
218
- self.num_kernels = len(resblock_kernel_sizes)
219
- self.num_upsamples = len(upsample_rates)
220
- self.conv_pre = Conv1d(
221
- initial_channel, upsample_initial_channel, 7, 1, padding=3
222
- )
223
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
224
-
225
- self.ups = nn.ModuleList()
226
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
227
- self.ups.append(
228
- weight_norm(
229
- ConvTranspose1d(
230
- upsample_initial_channel // (2**i),
231
- upsample_initial_channel // (2 ** (i + 1)),
232
- k,
233
- u,
234
- padding=(k - u) // 2,
235
- )
236
- )
237
- )
238
-
239
- self.resblocks = nn.ModuleList()
240
- for i in range(len(self.ups)):
241
- ch = upsample_initial_channel // (2 ** (i + 1))
242
- for j, (k, d) in enumerate(
243
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
244
- ):
245
- self.resblocks.append(resblock(ch, k, d))
246
-
247
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
248
- self.ups.apply(init_weights)
249
-
250
- if gin_channels != 0:
251
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
252
-
253
- def forward(self, x, g=None):
254
- x = self.conv_pre(x)
255
- if g is not None:
256
- x = x + self.cond(g)
257
-
258
- for i in range(self.num_upsamples):
259
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
260
- x = self.ups[i](x)
261
- xs = None
262
- for j in range(self.num_kernels):
263
- if xs is None:
264
- xs = self.resblocks[i * self.num_kernels + j](x)
265
- else:
266
- xs += self.resblocks[i * self.num_kernels + j](x)
267
- x = xs / self.num_kernels
268
- x = F.leaky_relu(x)
269
- x = self.conv_post(x)
270
- x = torch.tanh(x)
271
-
272
- return x
273
-
274
- def remove_weight_norm(self):
275
- for l in self.ups:
276
- remove_weight_norm(l)
277
- for l in self.resblocks:
278
- l.remove_weight_norm()
279
-
280
-
281
- class SineGen(torch.nn.Module):
282
- """Definition of sine generator
283
- SineGen(samp_rate, harmonic_num = 0,
284
- sine_amp = 0.1, noise_std = 0.003,
285
- voiced_threshold = 0,
286
- flag_for_pulse=False)
287
- samp_rate: sampling rate in Hz
288
- harmonic_num: number of harmonic overtones (default 0)
289
- sine_amp: amplitude of sine-wavefrom (default 0.1)
290
- noise_std: std of Gaussian noise (default 0.003)
291
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
292
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
293
- Note: when flag_for_pulse is True, the first time step of a voiced
294
- segment is always sin(np.pi) or cos(0)
295
- """
296
-
297
- def __init__(
298
- self,
299
- samp_rate,
300
- harmonic_num=0,
301
- sine_amp=0.1,
302
- noise_std=0.003,
303
- voiced_threshold=0,
304
- flag_for_pulse=False,
305
- ):
306
- super(SineGen, self).__init__()
307
- self.sine_amp = sine_amp
308
- self.noise_std = noise_std
309
- self.harmonic_num = harmonic_num
310
- self.dim = self.harmonic_num + 1
311
- self.sampling_rate = samp_rate
312
- self.voiced_threshold = voiced_threshold
313
-
314
- def _f02uv(self, f0):
315
- # generate uv signal
316
- uv = torch.ones_like(f0)
317
- uv = uv * (f0 > self.voiced_threshold)
318
- if uv.device.type == "privateuseone": # for DirectML
319
- uv = uv.float()
320
- return uv
321
-
322
- def forward(self, f0, upp):
323
- """sine_tensor, uv = forward(f0)
324
- input F0: tensor(batchsize=1, length, dim=1)
325
- f0 for unvoiced steps should be 0
326
- output sine_tensor: tensor(batchsize=1, length, dim)
327
- output uv: tensor(batchsize=1, length, 1)
328
- """
329
- with torch.no_grad():
330
- f0 = f0[:, None].transpose(1, 2)
331
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
332
- # fundamental component
333
- f0_buf[:, :, 0] = f0[:, :, 0]
334
- for idx in np.arange(self.harmonic_num):
335
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
336
- idx + 2
337
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
338
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
339
- rand_ini = torch.rand(
340
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
341
- )
342
- rand_ini[:, 0] = 0
343
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
344
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
345
- tmp_over_one *= upp
346
- tmp_over_one = F.interpolate(
347
- tmp_over_one.transpose(2, 1),
348
- scale_factor=upp,
349
- mode="linear",
350
- align_corners=True,
351
- ).transpose(2, 1)
352
- rad_values = F.interpolate(
353
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
354
- ).transpose(
355
- 2, 1
356
- ) #######
357
- tmp_over_one %= 1
358
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
359
- cumsum_shift = torch.zeros_like(rad_values)
360
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
361
- sine_waves = torch.sin(
362
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
363
- )
364
- sine_waves = sine_waves * self.sine_amp
365
- uv = self._f02uv(f0)
366
- uv = F.interpolate(
367
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
368
- ).transpose(2, 1)
369
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
370
- noise = noise_amp * torch.randn_like(sine_waves)
371
- sine_waves = sine_waves * uv + noise
372
- return sine_waves, uv, noise
373
-
374
-
375
- class SourceModuleHnNSF(torch.nn.Module):
376
- """SourceModule for hn-nsf
377
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
378
- add_noise_std=0.003, voiced_threshod=0)
379
- sampling_rate: sampling_rate in Hz
380
- harmonic_num: number of harmonic above F0 (default: 0)
381
- sine_amp: amplitude of sine source signal (default: 0.1)
382
- add_noise_std: std of additive Gaussian noise (default: 0.003)
383
- note that amplitude of noise in unvoiced is decided
384
- by sine_amp
385
- voiced_threshold: threhold to set U/V given F0 (default: 0)
386
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
387
- F0_sampled (batchsize, length, 1)
388
- Sine_source (batchsize, length, 1)
389
- noise_source (batchsize, length 1)
390
- uv (batchsize, length, 1)
391
- """
392
-
393
- def __init__(
394
- self,
395
- sampling_rate,
396
- harmonic_num=0,
397
- sine_amp=0.1,
398
- add_noise_std=0.003,
399
- voiced_threshod=0,
400
- is_half=True,
401
- ):
402
- super(SourceModuleHnNSF, self).__init__()
403
-
404
- self.sine_amp = sine_amp
405
- self.noise_std = add_noise_std
406
- self.is_half = is_half
407
- # to produce sine waveforms
408
- self.l_sin_gen = SineGen(
409
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
410
- )
411
-
412
- # to merge source harmonics into a single excitation
413
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
414
- self.l_tanh = torch.nn.Tanh()
415
-
416
- def forward(self, x, upp=None):
417
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
418
- if self.is_half:
419
- sine_wavs = sine_wavs.half()
420
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
421
- return sine_merge, None, None # noise, uv
422
-
423
-
424
- class GeneratorNSF(torch.nn.Module):
425
- def __init__(
426
- self,
427
- initial_channel,
428
- resblock,
429
- resblock_kernel_sizes,
430
- resblock_dilation_sizes,
431
- upsample_rates,
432
- upsample_initial_channel,
433
- upsample_kernel_sizes,
434
- gin_channels,
435
- sr,
436
- is_half=False,
437
- ):
438
- super(GeneratorNSF, self).__init__()
439
- self.num_kernels = len(resblock_kernel_sizes)
440
- self.num_upsamples = len(upsample_rates)
441
-
442
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
443
- self.m_source = SourceModuleHnNSF(
444
- sampling_rate=sr, harmonic_num=0, is_half=is_half
445
- )
446
- self.noise_convs = nn.ModuleList()
447
- self.conv_pre = Conv1d(
448
- initial_channel, upsample_initial_channel, 7, 1, padding=3
449
- )
450
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
451
-
452
- self.ups = nn.ModuleList()
453
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
454
- c_cur = upsample_initial_channel // (2 ** (i + 1))
455
- self.ups.append(
456
- weight_norm(
457
- ConvTranspose1d(
458
- upsample_initial_channel // (2**i),
459
- upsample_initial_channel // (2 ** (i + 1)),
460
- k,
461
- u,
462
- padding=(k - u) // 2,
463
- )
464
- )
465
- )
466
- if i + 1 < len(upsample_rates):
467
- stride_f0 = np.prod(upsample_rates[i + 1 :])
468
- self.noise_convs.append(
469
- Conv1d(
470
- 1,
471
- c_cur,
472
- kernel_size=stride_f0 * 2,
473
- stride=stride_f0,
474
- padding=stride_f0 // 2,
475
- )
476
- )
477
- else:
478
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
479
-
480
- self.resblocks = nn.ModuleList()
481
- for i in range(len(self.ups)):
482
- ch = upsample_initial_channel // (2 ** (i + 1))
483
- for j, (k, d) in enumerate(
484
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
485
- ):
486
- self.resblocks.append(resblock(ch, k, d))
487
-
488
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
489
- self.ups.apply(init_weights)
490
-
491
- if gin_channels != 0:
492
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
493
-
494
- self.upp = np.prod(upsample_rates)
495
-
496
- def forward(self, x, f0, g=None):
497
- har_source, noi_source, uv = self.m_source(f0, self.upp)
498
- har_source = har_source.transpose(1, 2)
499
- x = self.conv_pre(x)
500
- if g is not None:
501
- x = x + self.cond(g)
502
-
503
- for i in range(self.num_upsamples):
504
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
505
- x = self.ups[i](x)
506
- x_source = self.noise_convs[i](har_source)
507
- x = x + x_source
508
- xs = None
509
- for j in range(self.num_kernels):
510
- if xs is None:
511
- xs = self.resblocks[i * self.num_kernels + j](x)
512
- else:
513
- xs += self.resblocks[i * self.num_kernels + j](x)
514
- x = xs / self.num_kernels
515
- x = F.leaky_relu(x)
516
- x = self.conv_post(x)
517
- x = torch.tanh(x)
518
- return x
519
-
520
- def remove_weight_norm(self):
521
- for l in self.ups:
522
- remove_weight_norm(l)
523
- for l in self.resblocks:
524
- l.remove_weight_norm()
525
-
526
-
527
- sr2sr = {
528
- "32k": 32000,
529
- "40k": 40000,
530
- "48k": 48000,
531
- }
532
-
533
-
534
- class SynthesizerTrnMs256NSFsid(nn.Module):
535
- def __init__(
536
- self,
537
- spec_channels,
538
- segment_size,
539
- inter_channels,
540
- hidden_channels,
541
- filter_channels,
542
- n_heads,
543
- n_layers,
544
- kernel_size,
545
- p_dropout,
546
- resblock,
547
- resblock_kernel_sizes,
548
- resblock_dilation_sizes,
549
- upsample_rates,
550
- upsample_initial_channel,
551
- upsample_kernel_sizes,
552
- spk_embed_dim,
553
- gin_channels,
554
- sr,
555
- **kwargs
556
- ):
557
- super().__init__()
558
- if type(sr) == type("strr"):
559
- sr = sr2sr[sr]
560
- self.spec_channels = spec_channels
561
- self.inter_channels = inter_channels
562
- self.hidden_channels = hidden_channels
563
- self.filter_channels = filter_channels
564
- self.n_heads = n_heads
565
- self.n_layers = n_layers
566
- self.kernel_size = kernel_size
567
- self.p_dropout = p_dropout
568
- self.resblock = resblock
569
- self.resblock_kernel_sizes = resblock_kernel_sizes
570
- self.resblock_dilation_sizes = resblock_dilation_sizes
571
- self.upsample_rates = upsample_rates
572
- self.upsample_initial_channel = upsample_initial_channel
573
- self.upsample_kernel_sizes = upsample_kernel_sizes
574
- self.segment_size = segment_size
575
- self.gin_channels = gin_channels
576
- # self.hop_length = hop_length#
577
- self.spk_embed_dim = spk_embed_dim
578
- self.enc_p = TextEncoder256(
579
- inter_channels,
580
- hidden_channels,
581
- filter_channels,
582
- n_heads,
583
- n_layers,
584
- kernel_size,
585
- p_dropout,
586
- )
587
- self.dec = GeneratorNSF(
588
- inter_channels,
589
- resblock,
590
- resblock_kernel_sizes,
591
- resblock_dilation_sizes,
592
- upsample_rates,
593
- upsample_initial_channel,
594
- upsample_kernel_sizes,
595
- gin_channels=gin_channels,
596
- sr=sr,
597
- is_half=kwargs["is_half"],
598
- )
599
- self.enc_q = PosteriorEncoder(
600
- spec_channels,
601
- inter_channels,
602
- hidden_channels,
603
- 5,
604
- 1,
605
- 16,
606
- gin_channels=gin_channels,
607
- )
608
- self.flow = ResidualCouplingBlock(
609
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
610
- )
611
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
612
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
613
-
614
- def remove_weight_norm(self):
615
- self.dec.remove_weight_norm()
616
- self.flow.remove_weight_norm()
617
- self.enc_q.remove_weight_norm()
618
-
619
- def forward(
620
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
621
- ): # 这里ds是id,[bs,1]
622
- # print(1,pitch.shape)#[bs,t]
623
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
624
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
625
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
626
- z_p = self.flow(z, y_mask, g=g)
627
- z_slice, ids_slice = commons.rand_slice_segments(
628
- z, y_lengths, self.segment_size
629
- )
630
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
631
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
632
- # print(-2,pitchf.shape,z_slice.shape)
633
- o = self.dec(z_slice, pitchf, g=g)
634
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
635
-
636
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
637
- g = self.emb_g(sid).unsqueeze(-1)
638
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
639
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
640
- if rate:
641
- head = int(z_p.shape[2] * rate)
642
- z_p = z_p[:, :, -head:]
643
- x_mask = x_mask[:, :, -head:]
644
- nsff0 = nsff0[:, -head:]
645
- z = self.flow(z_p, x_mask, g=g, reverse=True)
646
- o = self.dec(z * x_mask, nsff0, g=g)
647
- return o, x_mask, (z, z_p, m_p, logs_p)
648
-
649
-
650
- class SynthesizerTrnMs768NSFsid(nn.Module):
651
- def __init__(
652
- self,
653
- spec_channels,
654
- segment_size,
655
- inter_channels,
656
- hidden_channels,
657
- filter_channels,
658
- n_heads,
659
- n_layers,
660
- kernel_size,
661
- p_dropout,
662
- resblock,
663
- resblock_kernel_sizes,
664
- resblock_dilation_sizes,
665
- upsample_rates,
666
- upsample_initial_channel,
667
- upsample_kernel_sizes,
668
- spk_embed_dim,
669
- gin_channels,
670
- sr,
671
- **kwargs
672
- ):
673
- super().__init__()
674
- if type(sr) == type("strr"):
675
- sr = sr2sr[sr]
676
- self.spec_channels = spec_channels
677
- self.inter_channels = inter_channels
678
- self.hidden_channels = hidden_channels
679
- self.filter_channels = filter_channels
680
- self.n_heads = n_heads
681
- self.n_layers = n_layers
682
- self.kernel_size = kernel_size
683
- self.p_dropout = p_dropout
684
- self.resblock = resblock
685
- self.resblock_kernel_sizes = resblock_kernel_sizes
686
- self.resblock_dilation_sizes = resblock_dilation_sizes
687
- self.upsample_rates = upsample_rates
688
- self.upsample_initial_channel = upsample_initial_channel
689
- self.upsample_kernel_sizes = upsample_kernel_sizes
690
- self.segment_size = segment_size
691
- self.gin_channels = gin_channels
692
- # self.hop_length = hop_length#
693
- self.spk_embed_dim = spk_embed_dim
694
- self.enc_p = TextEncoder768(
695
- inter_channels,
696
- hidden_channels,
697
- filter_channels,
698
- n_heads,
699
- n_layers,
700
- kernel_size,
701
- p_dropout,
702
- )
703
- self.dec = GeneratorNSF(
704
- inter_channels,
705
- resblock,
706
- resblock_kernel_sizes,
707
- resblock_dilation_sizes,
708
- upsample_rates,
709
- upsample_initial_channel,
710
- upsample_kernel_sizes,
711
- gin_channels=gin_channels,
712
- sr=sr,
713
- is_half=kwargs["is_half"],
714
- )
715
- self.enc_q = PosteriorEncoder(
716
- spec_channels,
717
- inter_channels,
718
- hidden_channels,
719
- 5,
720
- 1,
721
- 16,
722
- gin_channels=gin_channels,
723
- )
724
- self.flow = ResidualCouplingBlock(
725
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
726
- )
727
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
728
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
729
-
730
- def remove_weight_norm(self):
731
- self.dec.remove_weight_norm()
732
- self.flow.remove_weight_norm()
733
- self.enc_q.remove_weight_norm()
734
-
735
- def forward(
736
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
737
- ): # 这里ds是id,[bs,1]
738
- # print(1,pitch.shape)#[bs,t]
739
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
740
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
741
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
742
- z_p = self.flow(z, y_mask, g=g)
743
- z_slice, ids_slice = commons.rand_slice_segments(
744
- z, y_lengths, self.segment_size
745
- )
746
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
747
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
748
- # print(-2,pitchf.shape,z_slice.shape)
749
- o = self.dec(z_slice, pitchf, g=g)
750
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
751
-
752
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):
753
- g = self.emb_g(sid).unsqueeze(-1)
754
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
755
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
756
- if rate:
757
- head = int(z_p.shape[2] * rate)
758
- z_p = z_p[:, :, -head:]
759
- x_mask = x_mask[:, :, -head:]
760
- nsff0 = nsff0[:, -head:]
761
- z = self.flow(z_p, x_mask, g=g, reverse=True)
762
- o = self.dec(z * x_mask, nsff0, g=g)
763
- return o, x_mask, (z, z_p, m_p, logs_p)
764
-
765
-
766
- class SynthesizerTrnMs256NSFsid_nono(nn.Module):
767
- def __init__(
768
- self,
769
- spec_channels,
770
- segment_size,
771
- inter_channels,
772
- hidden_channels,
773
- filter_channels,
774
- n_heads,
775
- n_layers,
776
- kernel_size,
777
- p_dropout,
778
- resblock,
779
- resblock_kernel_sizes,
780
- resblock_dilation_sizes,
781
- upsample_rates,
782
- upsample_initial_channel,
783
- upsample_kernel_sizes,
784
- spk_embed_dim,
785
- gin_channels,
786
- sr=None,
787
- **kwargs
788
- ):
789
- super().__init__()
790
- self.spec_channels = spec_channels
791
- self.inter_channels = inter_channels
792
- self.hidden_channels = hidden_channels
793
- self.filter_channels = filter_channels
794
- self.n_heads = n_heads
795
- self.n_layers = n_layers
796
- self.kernel_size = kernel_size
797
- self.p_dropout = p_dropout
798
- self.resblock = resblock
799
- self.resblock_kernel_sizes = resblock_kernel_sizes
800
- self.resblock_dilation_sizes = resblock_dilation_sizes
801
- self.upsample_rates = upsample_rates
802
- self.upsample_initial_channel = upsample_initial_channel
803
- self.upsample_kernel_sizes = upsample_kernel_sizes
804
- self.segment_size = segment_size
805
- self.gin_channels = gin_channels
806
- # self.hop_length = hop_length#
807
- self.spk_embed_dim = spk_embed_dim
808
- self.enc_p = TextEncoder256(
809
- inter_channels,
810
- hidden_channels,
811
- filter_channels,
812
- n_heads,
813
- n_layers,
814
- kernel_size,
815
- p_dropout,
816
- f0=False,
817
- )
818
- self.dec = Generator(
819
- inter_channels,
820
- resblock,
821
- resblock_kernel_sizes,
822
- resblock_dilation_sizes,
823
- upsample_rates,
824
- upsample_initial_channel,
825
- upsample_kernel_sizes,
826
- gin_channels=gin_channels,
827
- )
828
- self.enc_q = PosteriorEncoder(
829
- spec_channels,
830
- inter_channels,
831
- hidden_channels,
832
- 5,
833
- 1,
834
- 16,
835
- gin_channels=gin_channels,
836
- )
837
- self.flow = ResidualCouplingBlock(
838
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
839
- )
840
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
841
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
842
-
843
- def remove_weight_norm(self):
844
- self.dec.remove_weight_norm()
845
- self.flow.remove_weight_norm()
846
- self.enc_q.remove_weight_norm()
847
-
848
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
849
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
850
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
851
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
852
- z_p = self.flow(z, y_mask, g=g)
853
- z_slice, ids_slice = commons.rand_slice_segments(
854
- z, y_lengths, self.segment_size
855
- )
856
- o = self.dec(z_slice, g=g)
857
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
858
-
859
- def infer(self, phone, phone_lengths, sid, rate=None):
860
- g = self.emb_g(sid).unsqueeze(-1)
861
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
862
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
863
- if rate:
864
- head = int(z_p.shape[2] * rate)
865
- z_p = z_p[:, :, -head:]
866
- x_mask = x_mask[:, :, -head:]
867
- z = self.flow(z_p, x_mask, g=g, reverse=True)
868
- o = self.dec(z * x_mask, g=g)
869
- return o, x_mask, (z, z_p, m_p, logs_p)
870
-
871
-
872
- class SynthesizerTrnMs768NSFsid_nono(nn.Module):
873
- def __init__(
874
- self,
875
- spec_channels,
876
- segment_size,
877
- inter_channels,
878
- hidden_channels,
879
- filter_channels,
880
- n_heads,
881
- n_layers,
882
- kernel_size,
883
- p_dropout,
884
- resblock,
885
- resblock_kernel_sizes,
886
- resblock_dilation_sizes,
887
- upsample_rates,
888
- upsample_initial_channel,
889
- upsample_kernel_sizes,
890
- spk_embed_dim,
891
- gin_channels,
892
- sr=None,
893
- **kwargs
894
- ):
895
- super().__init__()
896
- self.spec_channels = spec_channels
897
- self.inter_channels = inter_channels
898
- self.hidden_channels = hidden_channels
899
- self.filter_channels = filter_channels
900
- self.n_heads = n_heads
901
- self.n_layers = n_layers
902
- self.kernel_size = kernel_size
903
- self.p_dropout = p_dropout
904
- self.resblock = resblock
905
- self.resblock_kernel_sizes = resblock_kernel_sizes
906
- self.resblock_dilation_sizes = resblock_dilation_sizes
907
- self.upsample_rates = upsample_rates
908
- self.upsample_initial_channel = upsample_initial_channel
909
- self.upsample_kernel_sizes = upsample_kernel_sizes
910
- self.segment_size = segment_size
911
- self.gin_channels = gin_channels
912
- # self.hop_length = hop_length#
913
- self.spk_embed_dim = spk_embed_dim
914
- self.enc_p = TextEncoder768(
915
- inter_channels,
916
- hidden_channels,
917
- filter_channels,
918
- n_heads,
919
- n_layers,
920
- kernel_size,
921
- p_dropout,
922
- f0=False,
923
- )
924
- self.dec = Generator(
925
- inter_channels,
926
- resblock,
927
- resblock_kernel_sizes,
928
- resblock_dilation_sizes,
929
- upsample_rates,
930
- upsample_initial_channel,
931
- upsample_kernel_sizes,
932
- gin_channels=gin_channels,
933
- )
934
- self.enc_q = PosteriorEncoder(
935
- spec_channels,
936
- inter_channels,
937
- hidden_channels,
938
- 5,
939
- 1,
940
- 16,
941
- gin_channels=gin_channels,
942
- )
943
- self.flow = ResidualCouplingBlock(
944
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
945
- )
946
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
947
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
948
-
949
- def remove_weight_norm(self):
950
- self.dec.remove_weight_norm()
951
- self.flow.remove_weight_norm()
952
- self.enc_q.remove_weight_norm()
953
-
954
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
955
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
956
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
957
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
958
- z_p = self.flow(z, y_mask, g=g)
959
- z_slice, ids_slice = commons.rand_slice_segments(
960
- z, y_lengths, self.segment_size
961
- )
962
- o = self.dec(z_slice, g=g)
963
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
964
-
965
- def infer(self, phone, phone_lengths, sid, rate=None):
966
- g = self.emb_g(sid).unsqueeze(-1)
967
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
968
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
969
- if rate:
970
- head = int(z_p.shape[2] * rate)
971
- z_p = z_p[:, :, -head:]
972
- x_mask = x_mask[:, :, -head:]
973
- z = self.flow(z_p, x_mask, g=g, reverse=True)
974
- o = self.dec(z * x_mask, g=g)
975
- return o, x_mask, (z, z_p, m_p, logs_p)
976
-
977
-
978
- class MultiPeriodDiscriminator(torch.nn.Module):
979
- def __init__(self, use_spectral_norm=False):
980
- super(MultiPeriodDiscriminator, self).__init__()
981
- periods = [2, 3, 5, 7, 11, 17]
982
- # periods = [3, 5, 7, 11, 17, 23, 37]
983
-
984
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
985
- discs = discs + [
986
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
987
- ]
988
- self.discriminators = nn.ModuleList(discs)
989
-
990
- def forward(self, y, y_hat):
991
- y_d_rs = [] #
992
- y_d_gs = []
993
- fmap_rs = []
994
- fmap_gs = []
995
- for i, d in enumerate(self.discriminators):
996
- y_d_r, fmap_r = d(y)
997
- y_d_g, fmap_g = d(y_hat)
998
- # for j in range(len(fmap_r)):
999
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1000
- y_d_rs.append(y_d_r)
1001
- y_d_gs.append(y_d_g)
1002
- fmap_rs.append(fmap_r)
1003
- fmap_gs.append(fmap_g)
1004
-
1005
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1006
-
1007
-
1008
- class MultiPeriodDiscriminatorV2(torch.nn.Module):
1009
- def __init__(self, use_spectral_norm=False):
1010
- super(MultiPeriodDiscriminatorV2, self).__init__()
1011
- # periods = [2, 3, 5, 7, 11, 17]
1012
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
1013
-
1014
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
1015
- discs = discs + [
1016
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
1017
- ]
1018
- self.discriminators = nn.ModuleList(discs)
1019
-
1020
- def forward(self, y, y_hat):
1021
- y_d_rs = [] #
1022
- y_d_gs = []
1023
- fmap_rs = []
1024
- fmap_gs = []
1025
- for i, d in enumerate(self.discriminators):
1026
- y_d_r, fmap_r = d(y)
1027
- y_d_g, fmap_g = d(y_hat)
1028
- # for j in range(len(fmap_r)):
1029
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
1030
- y_d_rs.append(y_d_r)
1031
- y_d_gs.append(y_d_g)
1032
- fmap_rs.append(fmap_r)
1033
- fmap_gs.append(fmap_g)
1034
-
1035
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
1036
-
1037
-
1038
- class DiscriminatorS(torch.nn.Module):
1039
- def __init__(self, use_spectral_norm=False):
1040
- super(DiscriminatorS, self).__init__()
1041
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1042
- self.convs = nn.ModuleList(
1043
- [
1044
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
1045
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
1046
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
1047
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
1048
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
1049
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
1050
- ]
1051
- )
1052
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
1053
-
1054
- def forward(self, x):
1055
- fmap = []
1056
-
1057
- for l in self.convs:
1058
- x = l(x)
1059
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1060
- fmap.append(x)
1061
- x = self.conv_post(x)
1062
- fmap.append(x)
1063
- x = torch.flatten(x, 1, -1)
1064
-
1065
- return x, fmap
1066
-
1067
-
1068
- class DiscriminatorP(torch.nn.Module):
1069
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
1070
- super(DiscriminatorP, self).__init__()
1071
- self.period = period
1072
- self.use_spectral_norm = use_spectral_norm
1073
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
1074
- self.convs = nn.ModuleList(
1075
- [
1076
- norm_f(
1077
- Conv2d(
1078
- 1,
1079
- 32,
1080
- (kernel_size, 1),
1081
- (stride, 1),
1082
- padding=(get_padding(kernel_size, 1), 0),
1083
- )
1084
- ),
1085
- norm_f(
1086
- Conv2d(
1087
- 32,
1088
- 128,
1089
- (kernel_size, 1),
1090
- (stride, 1),
1091
- padding=(get_padding(kernel_size, 1), 0),
1092
- )
1093
- ),
1094
- norm_f(
1095
- Conv2d(
1096
- 128,
1097
- 512,
1098
- (kernel_size, 1),
1099
- (stride, 1),
1100
- padding=(get_padding(kernel_size, 1), 0),
1101
- )
1102
- ),
1103
- norm_f(
1104
- Conv2d(
1105
- 512,
1106
- 1024,
1107
- (kernel_size, 1),
1108
- (stride, 1),
1109
- padding=(get_padding(kernel_size, 1), 0),
1110
- )
1111
- ),
1112
- norm_f(
1113
- Conv2d(
1114
- 1024,
1115
- 1024,
1116
- (kernel_size, 1),
1117
- 1,
1118
- padding=(get_padding(kernel_size, 1), 0),
1119
- )
1120
- ),
1121
- ]
1122
- )
1123
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
1124
-
1125
- def forward(self, x):
1126
- fmap = []
1127
-
1128
- # 1d to 2d
1129
- b, c, t = x.shape
1130
- if t % self.period != 0: # pad first
1131
- n_pad = self.period - (t % self.period)
1132
- x = F.pad(x, (0, n_pad), "reflect")
1133
- t = t + n_pad
1134
- x = x.view(b, c, t // self.period, self.period)
1135
-
1136
- for l in self.convs:
1137
- x = l(x)
1138
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
1139
- fmap.append(x)
1140
- x = self.conv_post(x)
1141
- fmap.append(x)
1142
- x = torch.flatten(x, 1, -1)
1143
-
1144
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Carsim 2021 Download.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar e instalar la fuente AR Berkley</h1>
3
- <p>Si está buscando una fuente elegante y única para sus proyectos, es posible que desee consultar la fuente AR Berkley. Esta fuente es un tipo de letra elegante que tiene mucho carácter y encanto. Es ideal para crear titulares llamativos, logotipos, invitaciones, carteles y más. En este artículo, le mostraremos cómo descargar e instalar la fuente AR Berkley en su computadora Windows o Mac, y cómo usarla en sus proyectos. </p>
4
- <h2>¿Qué es la fuente AR Berkley? </h2>
5
- <h3>Una breve introducción a la fuente y sus características</h3>
6
- <p>La fuente AR Berkley es un tipo de letra de lujo que fue creado por Arphic Technology Co., Ltd. en 2005. Tiene un estilo escrito a mano que es ligeramente inclinado y curvo. Tiene 214 glifos, incluyendo latín básico, puntuación general, suplementos de latín-1 y un símbolo de moneda. Soporta seis alfabetos de idiomas, incluyendo pinyin chino, inglés, francés, italiano, latín y español. La fuente tiene mucha personalidad y estilo, lo que la hace adecuada para varios proyectos de diseño. </p>
7
- <h2>carsim 2021 download</h2><br /><p><b><b>Download File</b> ===> <a href="https://bltlly.com/2v6JYA">https://bltlly.com/2v6JYA</a></b></p><br /><br />
8
- <h3>Dónde encontrar y descargar la fuente de forma gratuita</h3>
9
- <p>Hay muchos sitios web que ofrecen fuentes gratuitas para descargar, pero no todos son confiables o legales. Algunas fuentes pueden tener virus o malware adjuntos, o pueden no tener licencias adecuadas para uso comercial. Por lo tanto, es importante descargar fuentes de fuentes de renombre que respeten los derechos de los creadores de fuentes. Una de esas fuentes es [DaFont]( 10 ), que es uno de los sitios más populares para encontrar fuentes gratuitas. Puede navegar por categoría o tipo, o usar la función de búsqueda avanzada para filtrar por tamaño, popularidad o estado 100% libre. También puede previsualizar cómo se ve la fuente antes de descargarla. </p>
10
- <h2>Cómo instalar la fuente en Windows</h2>
11
- <h3>Cómo descomprimir los archivos de fuente</h3>
12
-
13
- <h3>Cómo instalar la fuente usando el método de clic derecho</h3>
14
- <p>Una de las formas más fáciles de instalar una fuente en Windows es usando el método de clic derecho. Para hacer esto, busque el archivo de fuente que tiene un . ttf o . Extensión OTF en su carpeta de destino. Luego, haga clic derecho sobre ella y seleccione Instalar. Es posible que necesite permitir que el programa realice cambios en su computadora y confíe en la fuente de la fuente. Windows instalará automáticamente la fuente en sus archivos del sistema. </p>
15
- <h3>Cómo instalar la fuente usando el método del Panel de Control</h3>
16
- <p>Otra forma de instalar una fuente en Windows es usando el método Panel de control. Para hacer esto, abra su Panel de control y haga clic en Fuentes. Esto abrirá una ventana que muestra todas las fuentes instaladas en su computadora. Para agregar una nueva fuente, haga clic en Archivo y luego en Instalar nueva fuente. Aparecerá un cuadro de diálogo que le permite buscar en el equipo el archivo de fuente que desea instalar. Seleccione el archivo de fuente y haga clic en Aceptar. Windows instalará la fuente en sus archivos del sistema. </p>
17
- <h2>Cómo instalar la fuente en Mac</h2>
18
- <h3>Cómo descomprimir los archivos de fuente</h3>
19
- <p>Similar a Windows, después de descargar la fuente de DaFont u otro sitio web, por lo general tendrá un archivo ZIP en su carpeta de descargas. Para descomprimir el archivo, debe hacer doble clic en él. Mac extraerá automáticamente los archivos y creará una nueva carpeta con el mismo nombre que el archivo ZIP. Esta carpeta contendrá el propio archivo de fuente, que tiene un . ttf o . otf extensión, y a veces un archivo Readme o Info que tiene más información sobre la fuente. </p>
20
- <h3>Cómo instalar la fuente usando el método de doble clic</h3>
21
-
22
- <h3>Cómo instalar la fuente usando el método Font Book</h3>
23
- <p>Otra forma de instalar una fuente en Mac es usando el método Font Book. Para ello, abra la aplicación Libro de fuentes, que se encuentra en la carpeta Aplicaciones. Esta aplicación le permite administrar y organizar todas las fuentes instaladas en su computadora. Para agregar una nueva fuente, haga clic en Archivo y luego en Agregar fuentes. Aparecerá un cuadro de diálogo que le permite buscar en el equipo el archivo de fuente que desea instalar. Seleccione el archivo de fuente y haga clic en Abrir. Mac instalará la fuente en sus archivos del sistema. </p>
24
- <h2>Cómo usar la fuente en tus proyectos</h2>
25
- <h3>Algunos consejos y ejemplos de uso de la fuente para diferentes propósitos</h3>
26
- <p>Ahora que ha instalado la fuente AR Berkley en su computadora, puede usarla en sus proyectos. Aquí hay algunos consejos y ejemplos de cómo usar la fuente para diferentes propósitos:</p>
27
- <ul>
28
- <li>Si quieres crear un título o logotipo pegadizo, puedes usar la fuente AR Berkley en un tamaño grande y con un estilo negrita o cursiva. También puede agregar algunos efectos de color o degradado para que destaque más. </li>
29
- <li>Si quieres crear una invitación o un póster, puedes usar la fuente AR Berkley en un tamaño mediano y con un estilo regular o ligero. También puedes combinarlo con otra fuente que tenga un aspecto más formal o elegante, como Times New Roman o Arial.</li>
30
- <li>Si desea crear una tabla o un gráfico, puede usar la fuente AR Berkley en un tamaño pequeño y con un estilo regular o ligero. También puede alinearlo a la izquierda o al centro, dependiendo de su preferencia. </li>
31
- </ul>
32
- <p>Para ilustrar cómo se ve la fuente AR Berkley en diferentes tamaños y estilos, aquí hay una tabla que muestra algunos ejemplos:</p>
33
- <p></p>
34
- <tabla>
35
- <tr>
36
- <th>Tamaño</th>
37
- <th>Estilo</th>
38
- <th>Ejemplo</th>
39
- </tr>
40
- <tr>
41
- <td>48 pt</td>
42
- <td>Negrita</td>
43
- <td><span style="font-family: AR BERKLEY; font-size: 48pt; font-weight: bold;">AR Berkley Font</span></td>
44
- </tr>
45
- <tr>
46
- <td>36 pt</td>
47
- <td>Cursiva</td>
48
-
49
- </tr>
50
- <tr>
51
- <td>24 pt</td>
52
- <td>Regular</td>
53
- <td><span style="font-family: AR BERKLEY; font-size: 24pt;">AR Berkley Font</span></td>
54
- </tr>
55
- <tr>
56
- <td>18 pt</td>
57
- <td>Luz</td>
58
- <td><span style="font-family: AR BERKLEY; font-size: 18pt; font-weight: light;">AR Berkley Font</span></td>
59
- </tr>
60
- <tr>
61
- <td>12 pt</td>
62
- <td>Regular</td>
63
- <td><span style="font-family: AR BERKLEY; font-size: 12pt;">AR Berkley Font</span></td>
64
- </tr>
65
- </tabla>
66
- <h2>Conclusión</h2>
67
- <h3>Un resumen de los puntos principales y una llamada a la acción</h3>
68
- <p>En conclusión, la fuente AR Berkley es una tipografía elegante que tiene un estilo manuscrito y mucho carácter. Es ideal para crear titulares llamativos, logotipos, invitaciones, carteles y más. Puede descargar e instalar la fuente de forma gratuita desde fuentes de renombre como DaFont. También puede instalar la fuente en su computadora Windows o Mac utilizando diferentes métodos, como hacer clic con el botón derecho, hacer doble clic o usar el Panel de control o el Libro de fuentes. Puedes usar la fuente en tus proyectos en diferentes tamaños y estilos, dependiendo de tu propósito y preferencia. También puede combinarlo con otras fuentes para crear un aspecto más diverso y atractivo. Esperamos que hayas disfrutado de este artículo y hayas aprendido algo nuevo sobre la fuente AR Berkley. Si quieres saber más sobre las fuentes y cómo utilizarlas en tus proyectos, puedes consultar nuestros otros artículos en nuestra web. También puede suscribirse a nuestro boletín para obtener las últimas actualizaciones y consejos sobre fuentes y diseño. ¡Gracias por leer y tener un gran día! </p>
69
- <h2>Preguntas frecuentes</h2>
70
- <h3>¿Cuáles son algunas fuentes similares a AR Berkley? </h3>
71
- <p>Si te gusta la fuente AR Berkley, es posible que también te gusten algunas de estas fuentes similares que tienen un estilo elegante y manuscrito:</p>
72
- <ul>
73
- <li>[AR Bonnie]: Esta fuente tiene un estilo lúdico y caprichoso que es ideal para libros infantiles, dibujos animados o cómics. </li>
74
- <li>[AR Christy]: Esta fuente tiene un estilo casual y amigable que es perfecto para cartas, notas o mensajes personales. </li>
75
-
76
- </ul>
77
- <h3>¿Cuáles son algunos problemas comunes con la instalación de fuentes? </h3>
78
- <p>Algunos de los problemas comunes que puede encontrar al instalar fuentes son:</p>
79
- <ul>
80
- <li>El archivo de fuente está dañado o incompleto: Esto puede suceder si descarga la fuente desde una fuente no confiable o si la descarga se interrumpe. Para solucionar esto, debe eliminar el archivo de fuente y descargarlo de nuevo desde una fuente confiable. </li>
81
- <li>La fuente no es compatible con su sistema o software: Esto puede suceder si intenta instalar una fuente que no es compatible con su sistema operativo o aplicación. Para solucionar esto, debe verificar la compatibilidad de la fuente antes de descargarla o usar un software diferente que admita la fuente. </li>
82
- <li>La fuente no aparece en su software: Esto puede suceder si instala la fuente mientras su software se ejecuta o si tiene demasiadas fuentes instaladas en su computadora. Para solucionar esto, debe cerrar y volver a abrir su software o eliminar algunas de las fuentes que no usa. </li>
83
- </ul>
84
- <h3>¿Cómo puedo comprobar si una fuente es gratuita para uso comercial? </h3>
85
- <p>Algunas fuentes son gratuitas para uso personal, lo que significa que puedes usarlas para tus propios proyectos pero no para venderlas o distribuirlas. Si desea utilizar una fuente para uso comercial, lo que significa que puede usarla con fines de lucro o publicitarios, debe verificar la licencia de la fuente antes de descargarla. La licencia generalmente se incluye en el archivo Readme o Info que viene con el archivo de fuente. La licencia le dirá lo que puede y no puede hacer con la fuente, como modificarla, incrustarla o acreditarla. Algunas fuentes pueden requerir que usted pague una tarifa u obtenga permiso del creador para usarlas para uso comercial. </p>
86
- <h3>¿Cómo puedo crear mis propias fuentes? </h3>
87
-
88
- <h3>¿Cómo puedo desinstalar fuentes de mi ordenador? </h3>
89
- <p>Si desea desinstalar fuentes de su computadora, debe seguir estos pasos:</p>
90
- <ul>
91
- <li>Para Windows: Abra el panel de control y haga clic en Fuentes. Seleccione la fuente que desea desinstalar y haga clic en Eliminar. Confirme su acción y cierre la ventana. </li>
92
- <li>Para Mac: Abra su aplicación de libro de fuentes y seleccione la fuente que desea desinstalar. Haga clic en Archivo y luego Quitar fuente. Confirme su acción y cierre la ventana. </li>
93
- </ul></p> 64aa2da5cf<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/commands/list.py DELETED
@@ -1,365 +0,0 @@
1
- import json
2
- import logging
3
- from optparse import Values
4
- from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast
5
-
6
- from pip._vendor.packaging.utils import canonicalize_name
7
-
8
- from pip._internal.cli import cmdoptions
9
- from pip._internal.cli.req_command import IndexGroupCommand
10
- from pip._internal.cli.status_codes import SUCCESS
11
- from pip._internal.exceptions import CommandError
12
- from pip._internal.index.collector import LinkCollector
13
- from pip._internal.index.package_finder import PackageFinder
14
- from pip._internal.metadata import BaseDistribution, get_environment
15
- from pip._internal.models.selection_prefs import SelectionPreferences
16
- from pip._internal.network.session import PipSession
17
- from pip._internal.utils.compat import stdlib_pkgs
18
- from pip._internal.utils.misc import tabulate, write_output
19
-
20
- if TYPE_CHECKING:
21
- from pip._internal.metadata.base import DistributionVersion
22
-
23
- class _DistWithLatestInfo(BaseDistribution):
24
- """Give the distribution object a couple of extra fields.
25
-
26
- These will be populated during ``get_outdated()``. This is dirty but
27
- makes the rest of the code much cleaner.
28
- """
29
-
30
- latest_version: DistributionVersion
31
- latest_filetype: str
32
-
33
- _ProcessedDists = Sequence[_DistWithLatestInfo]
34
-
35
-
36
- logger = logging.getLogger(__name__)
37
-
38
-
39
- class ListCommand(IndexGroupCommand):
40
- """
41
- List installed packages, including editables.
42
-
43
- Packages are listed in a case-insensitive sorted order.
44
- """
45
-
46
- ignore_require_venv = True
47
- usage = """
48
- %prog [options]"""
49
-
50
- def add_options(self) -> None:
51
- self.cmd_opts.add_option(
52
- "-o",
53
- "--outdated",
54
- action="store_true",
55
- default=False,
56
- help="List outdated packages",
57
- )
58
- self.cmd_opts.add_option(
59
- "-u",
60
- "--uptodate",
61
- action="store_true",
62
- default=False,
63
- help="List uptodate packages",
64
- )
65
- self.cmd_opts.add_option(
66
- "-e",
67
- "--editable",
68
- action="store_true",
69
- default=False,
70
- help="List editable projects.",
71
- )
72
- self.cmd_opts.add_option(
73
- "-l",
74
- "--local",
75
- action="store_true",
76
- default=False,
77
- help=(
78
- "If in a virtualenv that has global access, do not list "
79
- "globally-installed packages."
80
- ),
81
- )
82
- self.cmd_opts.add_option(
83
- "--user",
84
- dest="user",
85
- action="store_true",
86
- default=False,
87
- help="Only output packages installed in user-site.",
88
- )
89
- self.cmd_opts.add_option(cmdoptions.list_path())
90
- self.cmd_opts.add_option(
91
- "--pre",
92
- action="store_true",
93
- default=False,
94
- help=(
95
- "Include pre-release and development versions. By default, "
96
- "pip only finds stable versions."
97
- ),
98
- )
99
-
100
- self.cmd_opts.add_option(
101
- "--format",
102
- action="store",
103
- dest="list_format",
104
- default="columns",
105
- choices=("columns", "freeze", "json"),
106
- help="Select the output format among: columns (default), freeze, or json",
107
- )
108
-
109
- self.cmd_opts.add_option(
110
- "--not-required",
111
- action="store_true",
112
- dest="not_required",
113
- help="List packages that are not dependencies of installed packages.",
114
- )
115
-
116
- self.cmd_opts.add_option(
117
- "--exclude-editable",
118
- action="store_false",
119
- dest="include_editable",
120
- help="Exclude editable package from output.",
121
- )
122
- self.cmd_opts.add_option(
123
- "--include-editable",
124
- action="store_true",
125
- dest="include_editable",
126
- help="Include editable package from output.",
127
- default=True,
128
- )
129
- self.cmd_opts.add_option(cmdoptions.list_exclude())
130
- index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
131
-
132
- self.parser.insert_option_group(0, index_opts)
133
- self.parser.insert_option_group(0, self.cmd_opts)
134
-
135
- def _build_package_finder(
136
- self, options: Values, session: PipSession
137
- ) -> PackageFinder:
138
- """
139
- Create a package finder appropriate to this list command.
140
- """
141
- link_collector = LinkCollector.create(session, options=options)
142
-
143
- # Pass allow_yanked=False to ignore yanked versions.
144
- selection_prefs = SelectionPreferences(
145
- allow_yanked=False,
146
- allow_all_prereleases=options.pre,
147
- )
148
-
149
- return PackageFinder.create(
150
- link_collector=link_collector,
151
- selection_prefs=selection_prefs,
152
- )
153
-
154
- def run(self, options: Values, args: List[str]) -> int:
155
- if options.outdated and options.uptodate:
156
- raise CommandError("Options --outdated and --uptodate cannot be combined.")
157
-
158
- if options.outdated and options.list_format == "freeze":
159
- raise CommandError(
160
- "List format 'freeze' can not be used with the --outdated option."
161
- )
162
-
163
- cmdoptions.check_list_path_option(options)
164
-
165
- skip = set(stdlib_pkgs)
166
- if options.excludes:
167
- skip.update(canonicalize_name(n) for n in options.excludes)
168
-
169
- packages: "_ProcessedDists" = [
170
- cast("_DistWithLatestInfo", d)
171
- for d in get_environment(options.path).iter_installed_distributions(
172
- local_only=options.local,
173
- user_only=options.user,
174
- editables_only=options.editable,
175
- include_editables=options.include_editable,
176
- skip=skip,
177
- )
178
- ]
179
-
180
- # get_not_required must be called firstly in order to find and
181
- # filter out all dependencies correctly. Otherwise a package
182
- # can't be identified as requirement because some parent packages
183
- # could be filtered out before.
184
- if options.not_required:
185
- packages = self.get_not_required(packages, options)
186
-
187
- if options.outdated:
188
- packages = self.get_outdated(packages, options)
189
- elif options.uptodate:
190
- packages = self.get_uptodate(packages, options)
191
-
192
- self.output_package_listing(packages, options)
193
- return SUCCESS
194
-
195
- def get_outdated(
196
- self, packages: "_ProcessedDists", options: Values
197
- ) -> "_ProcessedDists":
198
- return [
199
- dist
200
- for dist in self.iter_packages_latest_infos(packages, options)
201
- if dist.latest_version > dist.version
202
- ]
203
-
204
- def get_uptodate(
205
- self, packages: "_ProcessedDists", options: Values
206
- ) -> "_ProcessedDists":
207
- return [
208
- dist
209
- for dist in self.iter_packages_latest_infos(packages, options)
210
- if dist.latest_version == dist.version
211
- ]
212
-
213
- def get_not_required(
214
- self, packages: "_ProcessedDists", options: Values
215
- ) -> "_ProcessedDists":
216
- dep_keys = {
217
- canonicalize_name(dep.name)
218
- for dist in packages
219
- for dep in (dist.iter_dependencies() or ())
220
- }
221
-
222
- # Create a set to remove duplicate packages, and cast it to a list
223
- # to keep the return type consistent with get_outdated and
224
- # get_uptodate
225
- return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys})
226
-
227
- def iter_packages_latest_infos(
228
- self, packages: "_ProcessedDists", options: Values
229
- ) -> Generator["_DistWithLatestInfo", None, None]:
230
- with self._build_session(options) as session:
231
- finder = self._build_package_finder(options, session)
232
-
233
- def latest_info(
234
- dist: "_DistWithLatestInfo",
235
- ) -> Optional["_DistWithLatestInfo"]:
236
- all_candidates = finder.find_all_candidates(dist.canonical_name)
237
- if not options.pre:
238
- # Remove prereleases
239
- all_candidates = [
240
- candidate
241
- for candidate in all_candidates
242
- if not candidate.version.is_prerelease
243
- ]
244
-
245
- evaluator = finder.make_candidate_evaluator(
246
- project_name=dist.canonical_name,
247
- )
248
- best_candidate = evaluator.sort_best_candidate(all_candidates)
249
- if best_candidate is None:
250
- return None
251
-
252
- remote_version = best_candidate.version
253
- if best_candidate.link.is_wheel:
254
- typ = "wheel"
255
- else:
256
- typ = "sdist"
257
- dist.latest_version = remote_version
258
- dist.latest_filetype = typ
259
- return dist
260
-
261
- for dist in map(latest_info, packages):
262
- if dist is not None:
263
- yield dist
264
-
265
- def output_package_listing(
266
- self, packages: "_ProcessedDists", options: Values
267
- ) -> None:
268
- packages = sorted(
269
- packages,
270
- key=lambda dist: dist.canonical_name,
271
- )
272
- if options.list_format == "columns" and packages:
273
- data, header = format_for_columns(packages, options)
274
- self.output_package_listing_columns(data, header)
275
- elif options.list_format == "freeze":
276
- for dist in packages:
277
- if options.verbose >= 1:
278
- write_output(
279
- "%s==%s (%s)", dist.raw_name, dist.version, dist.location
280
- )
281
- else:
282
- write_output("%s==%s", dist.raw_name, dist.version)
283
- elif options.list_format == "json":
284
- write_output(format_for_json(packages, options))
285
-
286
- def output_package_listing_columns(
287
- self, data: List[List[str]], header: List[str]
288
- ) -> None:
289
- # insert the header first: we need to know the size of column names
290
- if len(data) > 0:
291
- data.insert(0, header)
292
-
293
- pkg_strings, sizes = tabulate(data)
294
-
295
- # Create and add a separator.
296
- if len(data) > 0:
297
- pkg_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes)))
298
-
299
- for val in pkg_strings:
300
- write_output(val)
301
-
302
-
303
- def format_for_columns(
304
- pkgs: "_ProcessedDists", options: Values
305
- ) -> Tuple[List[List[str]], List[str]]:
306
- """
307
- Convert the package data into something usable
308
- by output_package_listing_columns.
309
- """
310
- header = ["Package", "Version"]
311
-
312
- running_outdated = options.outdated
313
- if running_outdated:
314
- header.extend(["Latest", "Type"])
315
-
316
- has_editables = any(x.editable for x in pkgs)
317
- if has_editables:
318
- header.append("Editable project location")
319
-
320
- if options.verbose >= 1:
321
- header.append("Location")
322
- if options.verbose >= 1:
323
- header.append("Installer")
324
-
325
- data = []
326
- for proj in pkgs:
327
- # if we're working on the 'outdated' list, separate out the
328
- # latest_version and type
329
- row = [proj.raw_name, str(proj.version)]
330
-
331
- if running_outdated:
332
- row.append(str(proj.latest_version))
333
- row.append(proj.latest_filetype)
334
-
335
- if has_editables:
336
- row.append(proj.editable_project_location or "")
337
-
338
- if options.verbose >= 1:
339
- row.append(proj.location or "")
340
- if options.verbose >= 1:
341
- row.append(proj.installer)
342
-
343
- data.append(row)
344
-
345
- return data, header
346
-
347
-
348
- def format_for_json(packages: "_ProcessedDists", options: Values) -> str:
349
- data = []
350
- for dist in packages:
351
- info = {
352
- "name": dist.raw_name,
353
- "version": str(dist.version),
354
- }
355
- if options.verbose >= 1:
356
- info["location"] = dist.location or ""
357
- info["installer"] = dist.installer
358
- if options.outdated:
359
- info["latest_version"] = str(dist.latest_version)
360
- info["latest_filetype"] = dist.latest_filetype
361
- editable_project_location = dist.editable_project_location
362
- if editable_project_location:
363
- info["editable_project_location"] = editable_project_location
364
- data.append(info)
365
- return json.dumps(data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/mbcharsetprober.py DELETED
@@ -1,95 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Universal charset detector code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 2001
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- # Shy Shalom - original C code
12
- # Proofpoint, Inc.
13
- #
14
- # This library is free software; you can redistribute it and/or
15
- # modify it under the terms of the GNU Lesser General Public
16
- # License as published by the Free Software Foundation; either
17
- # version 2.1 of the License, or (at your option) any later version.
18
- #
19
- # This library is distributed in the hope that it will be useful,
20
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
21
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22
- # Lesser General Public License for more details.
23
- #
24
- # You should have received a copy of the GNU Lesser General Public
25
- # License along with this library; if not, write to the Free Software
26
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27
- # 02110-1301 USA
28
- ######################### END LICENSE BLOCK #########################
29
-
30
- from typing import Optional, Union
31
-
32
- from .chardistribution import CharDistributionAnalysis
33
- from .charsetprober import CharSetProber
34
- from .codingstatemachine import CodingStateMachine
35
- from .enums import LanguageFilter, MachineState, ProbingState
36
-
37
-
38
- class MultiByteCharSetProber(CharSetProber):
39
- """
40
- MultiByteCharSetProber
41
- """
42
-
43
- def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
44
- super().__init__(lang_filter=lang_filter)
45
- self.distribution_analyzer: Optional[CharDistributionAnalysis] = None
46
- self.coding_sm: Optional[CodingStateMachine] = None
47
- self._last_char = bytearray(b"\0\0")
48
-
49
- def reset(self) -> None:
50
- super().reset()
51
- if self.coding_sm:
52
- self.coding_sm.reset()
53
- if self.distribution_analyzer:
54
- self.distribution_analyzer.reset()
55
- self._last_char = bytearray(b"\0\0")
56
-
57
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
58
- assert self.coding_sm is not None
59
- assert self.distribution_analyzer is not None
60
-
61
- for i, byte in enumerate(byte_str):
62
- coding_state = self.coding_sm.next_state(byte)
63
- if coding_state == MachineState.ERROR:
64
- self.logger.debug(
65
- "%s %s prober hit error at byte %s",
66
- self.charset_name,
67
- self.language,
68
- i,
69
- )
70
- self._state = ProbingState.NOT_ME
71
- break
72
- if coding_state == MachineState.ITS_ME:
73
- self._state = ProbingState.FOUND_IT
74
- break
75
- if coding_state == MachineState.START:
76
- char_len = self.coding_sm.get_current_charlen()
77
- if i == 0:
78
- self._last_char[1] = byte
79
- self.distribution_analyzer.feed(self._last_char, char_len)
80
- else:
81
- self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
82
-
83
- self._last_char[0] = byte_str[-1]
84
-
85
- if self.state == ProbingState.DETECTING:
86
- if self.distribution_analyzer.got_enough_data() and (
87
- self.get_confidence() > self.SHORTCUT_THRESHOLD
88
- ):
89
- self._state = ProbingState.FOUND_IT
90
-
91
- return self.state
92
-
93
- def get_confidence(self) -> float:
94
- assert self.distribution_analyzer is not None
95
- return self.distribution_analyzer.get_confidence()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boilin/URetinex-Net/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: URetinex Net
3
- emoji: 💻
4
- colorFrom: gray
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.20.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BramVanroy/opus-mt/app.py DELETED
@@ -1,156 +0,0 @@
1
- import json
2
- from io import StringIO
3
- from urllib.parse import quote
4
-
5
- import streamlit as st
6
-
7
- from utils import MODEL_MAP, REV_MODEL_MAP, get_tgt_langs_for_src, load_mt_pipeline, load_stanza, sentence_split, \
8
- set_st_query_params, translate
9
-
10
- st.title("📝 Translate text with Opus-MT")
11
-
12
- ##################
13
- # User selection #
14
- ##################
15
- st.markdown("## Model ✨")
16
- src_col, tgt_col = st.columns(2)
17
-
18
- DEFAULTS = {"src_lang": "en", "tgt_lang": "nl", "text": "Grandma is baking cookies!"}
19
-
20
- # 1. Set en_nl to the default model
21
- if "src_lang" not in st.session_state:
22
- st.session_state["src_lang"] = "en"
23
- if "tgt_lang" not in st.session_state:
24
- st.session_state["tgt_lang"] = "nl"
25
- if "selected_model" not in st.session_state:
26
- st.session_state["selected_model"] = None
27
- if "stanza_model" not in st.session_state:
28
- st.session_state["stanza_model"] = None
29
- if "text" not in st.session_state:
30
- st.session_state["text"] = None
31
-
32
- # Read URL parameters
33
- for k, v in st.experimental_get_query_params().items():
34
- if k in st.session_state and v:
35
- if (k == "src_lang" or k == "tgt_lang") and v[0] not in REV_MODEL_MAP:
36
- continue
37
- st.session_state[k] = v[0]
38
-
39
- # 2. Allow some basic language selection for novices
40
- selected_full_src_lang = REV_MODEL_MAP[st.session_state["src_lang"]]
41
- selected_src_lang = src_col.selectbox("Source language", tuple(MODEL_MAP.keys()),
42
- index=list(MODEL_MAP.keys()).index(selected_full_src_lang))
43
- st.session_state["src_lang"] = MODEL_MAP[selected_src_lang]
44
- compat_tgt_langs = get_tgt_langs_for_src(MODEL_MAP[selected_src_lang])
45
-
46
- selected_tgt_lang = None
47
- if compat_tgt_langs is not None:
48
- selected_full_tgt_lang = REV_MODEL_MAP[st.session_state["tgt_lang"]]
49
- selected_tgt_lang = tgt_col.selectbox("Target language",
50
- compat_tgt_langs,
51
- index=compat_tgt_langs.index(selected_full_tgt_lang)
52
- if selected_full_tgt_lang in compat_tgt_langs else 0)
53
- st.session_state["tgt_lang"] = MODEL_MAP[selected_tgt_lang]
54
- else:
55
- tgt_col.error(f"No compatible target languages found for source language {selected_src_lang}.")
56
-
57
- model_id = f"Helsinki-NLP/opus-mt-{st.session_state['src_lang']}-{st.session_state['tgt_lang']}"
58
- stanza_id = f"{st.session_state['src_lang']}_stanza"
59
-
60
- ######################
61
- # (Down)oading model #
62
- ######################
63
- load_info = tgt_col.info("Click button to load a new model")
64
- load_btn = src_col.button("Load new model")
65
- models_loaded = model_id in st.session_state and stanza_id in st.session_state
66
-
67
- if models_loaded:
68
- load_info.success(f"{model_id} loaded!")
69
- else:
70
- if load_btn: # On click
71
- # Check if the model exists, if not download it. Return None when there was an error downloading the model
72
- load_info.info("(Down)loading model...")
73
- model_tokenizer = load_mt_pipeline(model_id) # Tuple with model, tokenizer
74
- stanza_pipe = load_stanza(st.session_state["src_lang"])
75
-
76
- if model_tokenizer is not None and stanza_pipe is not None:
77
- st.session_state[model_id] = model_tokenizer
78
- st.session_state[stanza_id] = stanza_pipe
79
- load_info.success(f"{model_id} loaded!")
80
- else:
81
- search_url = "https://huggingface.co/models?sort=downloads&search=" + quote(model_id)
82
- load_info.error(f"Error when trying to (down)load {model_id}! It probably"
83
- f" [does not exist]({search_url}) or something went wrong when loading the sentence"
84
- f" segmentation (stanza). [Contact me](https://twitter.com/BramVanroy).")
85
-
86
- models_loaded = model_id in st.session_state and stanza_id in st.session_state
87
-
88
- #############################
89
- # File upload or text input #
90
- #############################
91
- st.markdown("## Input Data 📄")
92
-
93
- fupload_check = st.checkbox("Use file upload?")
94
- input_col, output_col = st.columns(2)
95
-
96
-
97
- if fupload_check:
98
- uploaded_file = input_col.file_uploader("Choose a text file to translate")
99
- if uploaded_file is not None:
100
- stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
101
- st.session_state["text"] = stringio.read()
102
-
103
- st.session_state["text"] = input_col.text_area(label="Text to translate",
104
- value=st.session_state["text"] if st.session_state["text"]
105
- else "Grandma is baking cookies!")
106
-
107
-
108
- ########################
109
- # Show MT translations #
110
- ########################
111
- if models_loaded and st.session_state["text"]:
112
- model, tokenizer = st.session_state[model_id]
113
- with st.spinner(text="Translating..."):
114
- sentences = sentence_split(st.session_state[stanza_id], st.session_state["text"])
115
- translations = translate(model, tokenizer, sentences)
116
- concat_translations = " ".join(translations)
117
- try:
118
- # Only supported in newer Streamlit
119
- output_col.text_area(label="Translation", value=concat_translations, disabled=True)
120
- except TypeError:
121
- output_col.text_area(label="Translation", value=concat_translations)
122
-
123
- set_st_query_params()
124
-
125
- # Download options
126
- txt_col, bitext_col = st.columns(2)
127
- txt_col.download_button(
128
- "Download translations",
129
- concat_translations,
130
- f"translation-{st.session_state['tgt_lang']}.txt",
131
- "text",
132
- key="download-txt",
133
- help="Download translation as text file"
134
- )
135
-
136
- bitext = "\n".join("\t".join(srctgt) for srctgt in zip(sentences, translations)) + "\n"
137
- bitext_col.download_button(
138
- "Download bitext",
139
- bitext,
140
- f"bitext-{st.session_state['src_lang']}-{st.session_state['tgt_lang']}.txt",
141
- "text",
142
- key="download-txt",
143
- help="Download tab-seperated bitext"
144
- )
145
-
146
-
147
- ########################
148
- # Information, socials #
149
- ########################
150
- st.markdown("## Info and Contact ✒️")
151
- st.markdown("This demo allows you to use [Opus-MT](https://github.com/Helsinki-NLP/Opus-MT) models straight"
152
- " from your browser to generate translations. Because the Opus models are trained on single sentences,"
153
- " we use [stanza](https://stanfordnlp.github.io/stanza/) behind the scenes for sentence segmentation,"
154
- " before feeding your input to the model.")
155
- st.markdown("Would you like additional functionality in the demo? Other languages perhaps? Give me a shout on"
156
- " [Twitter](https://twitter.com/BramVanroy)! ✉️")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CALM/Dashboard/streamlit_observable/frontend/build/index.html DELETED
@@ -1 +0,0 @@
1
- <!doctype html><html lang="en"><head><title>Streamlit Component</title><meta charset="UTF-8"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="Streamlit Component"/><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@observablehq/inspector@3/dist/inspector.css"/></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function t(t){for(var n,l,a=t[0],p=t[1],i=t[2],c=0,s=[];c<a.length;c++)l=a[c],Object.prototype.hasOwnProperty.call(o,l)&&o[l]&&s.push(o[l][0]),o[l]=0;for(n in p)Object.prototype.hasOwnProperty.call(p,n)&&(e[n]=p[n]);for(f&&f(t);s.length;)s.shift()();return u.push.apply(u,i||[]),r()}function r(){for(var e,t=0;t<u.length;t++){for(var r=u[t],n=!0,a=1;a<r.length;a++){var p=r[a];0!==o[p]&&(n=!1)}n&&(u.splice(t--,1),e=l(l.s=r[0]))}return e}var n={},o={1:0},u=[];function l(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,l),r.l=!0,r.exports}l.m=e,l.c=n,l.d=function(e,t,r){l.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},l.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},l.t=function(e,t){if(1&t&&(e=l(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(l.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)l.d(r,n,function(t){return e[t]}.bind(null,n));return r},l.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return l.d(t,"a",t),t},l.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},l.p="./";var a=this.webpackJsonpstreamlit_component_template=this.webpackJsonpstreamlit_component_template||[],p=a.push.bind(a);a.push=t,a=a.slice();for(var i=0;i<a.length;i++)t(a[i]);var f=p;r()}([])</script><script src="./static/js/2.b1c975ff.chunk.js"></script><script src="./static/js/main.fc603b94.chunk.js"></script></body></html>
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_roi_heads.py DELETED
@@ -1,108 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.config import get_cfg
7
- from detectron2.modeling.backbone import build_backbone
8
- from detectron2.modeling.proposal_generator.build import build_proposal_generator
9
- from detectron2.modeling.roi_heads import build_roi_heads
10
- from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
11
- from detectron2.utils.events import EventStorage
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class ROIHeadsTest(unittest.TestCase):
17
- def test_roi_heads(self):
18
- torch.manual_seed(121)
19
- cfg = get_cfg()
20
- cfg.MODEL.ROI_HEADS.NAME = "StandardROIHeads"
21
- cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
22
- cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
23
- cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
24
- cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5)
25
- backbone = build_backbone(cfg)
26
- num_images = 2
27
- images_tensor = torch.rand(num_images, 20, 30)
28
- image_sizes = [(10, 10), (20, 30)]
29
- images = ImageList(images_tensor, image_sizes)
30
- num_channels = 1024
31
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
32
-
33
- image_shape = (15, 15)
34
- gt_boxes0 = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
35
- gt_instance0 = Instances(image_shape)
36
- gt_instance0.gt_boxes = Boxes(gt_boxes0)
37
- gt_instance0.gt_classes = torch.tensor([2, 1])
38
- gt_boxes1 = torch.tensor([[1, 5, 2, 8], [7, 3, 10, 5]], dtype=torch.float32)
39
- gt_instance1 = Instances(image_shape)
40
- gt_instance1.gt_boxes = Boxes(gt_boxes1)
41
- gt_instance1.gt_classes = torch.tensor([1, 2])
42
- gt_instances = [gt_instance0, gt_instance1]
43
-
44
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
45
- roi_heads = build_roi_heads(cfg, backbone.output_shape())
46
-
47
- with EventStorage(): # capture events in a new storage to discard them
48
- proposals, proposal_losses = proposal_generator(images, features, gt_instances)
49
- _, detector_losses = roi_heads(images, features, proposals, gt_instances)
50
-
51
- expected_losses = {
52
- "loss_cls": torch.tensor(4.4236516953),
53
- "loss_box_reg": torch.tensor(0.0091214813),
54
- }
55
- for name in expected_losses.keys():
56
- self.assertTrue(torch.allclose(detector_losses[name], expected_losses[name]))
57
-
58
- def test_rroi_heads(self):
59
- torch.manual_seed(121)
60
- cfg = get_cfg()
61
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
62
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
63
- cfg.MODEL.ROI_HEADS.NAME = "RROIHeads"
64
- cfg.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
65
- cfg.MODEL.ROI_BOX_HEAD.NUM_FC = 2
66
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
67
- cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
68
- cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignRotated"
69
- cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10, 10, 5, 5, 1)
70
- backbone = build_backbone(cfg)
71
- num_images = 2
72
- images_tensor = torch.rand(num_images, 20, 30)
73
- image_sizes = [(10, 10), (20, 30)]
74
- images = ImageList(images_tensor, image_sizes)
75
- num_channels = 1024
76
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
77
-
78
- image_shape = (15, 15)
79
- gt_boxes0 = torch.tensor([[2, 2, 2, 2, 30], [4, 4, 4, 4, 0]], dtype=torch.float32)
80
- gt_instance0 = Instances(image_shape)
81
- gt_instance0.gt_boxes = RotatedBoxes(gt_boxes0)
82
- gt_instance0.gt_classes = torch.tensor([2, 1])
83
- gt_boxes1 = torch.tensor([[1.5, 5.5, 1, 3, 0], [8.5, 4, 3, 2, -50]], dtype=torch.float32)
84
- gt_instance1 = Instances(image_shape)
85
- gt_instance1.gt_boxes = RotatedBoxes(gt_boxes1)
86
- gt_instance1.gt_classes = torch.tensor([1, 2])
87
- gt_instances = [gt_instance0, gt_instance1]
88
-
89
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
90
- roi_heads = build_roi_heads(cfg, backbone.output_shape())
91
-
92
- with EventStorage(): # capture events in a new storage to discard them
93
- proposals, proposal_losses = proposal_generator(images, features, gt_instances)
94
- _, detector_losses = roi_heads(images, features, proposals, gt_instances)
95
-
96
- expected_losses = {
97
- "loss_cls": torch.tensor(4.381443977355957),
98
- "loss_box_reg": torch.tensor(0.0011560433777049184),
99
- }
100
- for name in expected_losses.keys():
101
- err_msg = "detector_losses[{}] = {}, expected losses = {}".format(
102
- name, detector_losses[name], expected_losses[name]
103
- )
104
- self.assertTrue(torch.allclose(detector_losses[name], expected_losses[name]), err_msg)
105
-
106
-
107
- if __name__ == "__main__":
108
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/cmake/thrust-config-version.cmake DELETED
@@ -1,33 +0,0 @@
1
- # Parse version information from version.h:
2
- file(READ "${CMAKE_CURRENT_LIST_DIR}/../version.h" THRUST_VERSION_HEADER)
3
- string(REGEX MATCH "#define[ \t]+THRUST_VERSION[ \t]+([0-9]+)" DUMMY "${THRUST_VERSION_HEADER}")
4
- set(THRUST_VERSION_FLAT ${CMAKE_MATCH_1})
5
- # Note that Thrust calls this the PATCH number, CMake calls it the TWEAK number:
6
- string(REGEX MATCH "#define[ \t]+THRUST_PATCH_NUMBER[ \t]+([0-9]+)" DUMMY "${THRUST_VERSION_HEADER}")
7
- set(THRUST_VERSION_TWEAK ${CMAKE_MATCH_1})
8
-
9
- math(EXPR THRUST_VERSION_MAJOR "${THRUST_VERSION_FLAT} / 100000")
10
- math(EXPR THRUST_VERSION_MINOR "(${THRUST_VERSION_FLAT} / 100) % 1000")
11
- math(EXPR THRUST_VERSION_PATCH "${THRUST_VERSION_FLAT} % 100") # Thrust: "subminor" CMake: "patch"
12
-
13
- # Build comparison versions:
14
- set(THRUST_COMPAT "${THRUST_VERSION_MAJOR}.${THRUST_VERSION_MINOR}.${THRUST_VERSION_PATCH}")
15
- set(THRUST_EXACT "${THRUST_COMPAT}.${THRUST_VERSION_TWEAK}")
16
- set(FIND_COMPAT "${PACKAGE_FIND_VERSION_MAJOR}.${PACKAGE_FIND_VERSION_MINOR}.${PACKAGE_FIND_VERSION_PATCH}")
17
- set(FIND_EXACT "${FIND_COMPAT}.${PACKAGE_FIND_VERSION_TWEAK}")
18
-
19
- # Set default results
20
- set(PACKAGE_VERSION ${THRUST_EXACT})
21
- set(PACKAGE_VERSION_UNSUITABLE FALSE)
22
- set(PACKAGE_VERSION_COMPATIBLE FALSE)
23
- set(PACKAGE_VERSION_EXACT FALSE)
24
-
25
- # Test for compatibility (ignores tweak)
26
- if (FIND_COMPAT VERSION_EQUAL THRUST_COMPAT)
27
- set(PACKAGE_VERSION_COMPATIBLE TRUE)
28
- endif()
29
-
30
- # Test for exact (does not ignore tweak)
31
- if (FIND_EXACT VERSION_EQUAL THRUST_EXACT)
32
- set(PACKAGE_VERSION_EXACT TRUE)
33
- endif()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/hf_model.py DELETED
@@ -1,27 +0,0 @@
1
- import logging
2
-
3
- from transformers import AutoConfig
4
- from transformers import AutoModel
5
-
6
- from .registry import register_lang_encoder
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- @register_lang_encoder
12
- def lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
13
-
14
- hf_model = None
15
- if config_encoder['LOAD_PRETRAINED']:
16
- hf_model = AutoModel.from_pretrained(config_encoder['HF_MODEL'])
17
- else:
18
- hf_config = AutoConfig.from_pretrained(config_encoder['HF_MODEL'])
19
-
20
- if 'CONFIG_OVERRIDE' in config_encoder:
21
- logger.warning(f'Override config: {config_encoder["CONFIG_OVERRIDE"]}')
22
- hf_config.update(config_encoder['CONFIG_OVERRIDE'])
23
-
24
- logger.info(f'HF model config: {hf_config}')
25
- hf_model = AutoModel.from_config(hf_config)
26
-
27
- return hf_model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chitranshu/Dashboard-Uber/Dockerfile DELETED
@@ -1,16 +0,0 @@
1
- FROM python:3.9
2
-
3
- WORKDIR /code
4
-
5
- COPY ./requirements.txt /code/requirements.txt
6
- RUN python3 -m pip install --no-cache-dir --upgrade pip
7
- RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
-
9
- COPY . .
10
-
11
- CMD ["panel", "serve", "/code/app.py", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "*"]
12
-
13
- RUN mkdir /.cache
14
- RUN chmod 777 /.cache
15
- RUN mkdir .chroma
16
- RUN chmod 777 .chroma
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chris4K/german-sentiment-bert/app.py DELETED
@@ -1,9 +0,0 @@
1
- import gradio as gr
2
-
3
-
4
- title = "My first Demo with Hugging Face"
5
- description = "This is a demo as a example"
6
-
7
-
8
-
9
- gr.Interface.load("models/oliverguhr/german-sentiment-bert").launch()
 
 
 
 
 
 
 
 
 
 
spaces/Chukwuka/Dog_Breed_ImageWoof/model.py DELETED
@@ -1,67 +0,0 @@
1
-
2
- import torch
3
- from torch import nn
4
- import torch.nn.functional as F
5
- import torchvision
6
- from utils import *
7
- from data_setup import classes
8
-
9
-
10
- class ImageClassificationBase(nn.Module):
11
- def training_step(self, batch):
12
- images, labels = batch
13
- out = self(images)
14
- # labels = labels.float().unsqueeze(1)
15
- loss = F.cross_entropy(out, labels)
16
- acc = accuracy(out, labels)
17
- # print('training loss and acc:', loss, acc)
18
- return loss, acc
19
-
20
- def validation_step(self, batch):
21
- images, labels = batch
22
- out = self(images)
23
- # labels = labels.float().unsqueeze(1)
24
- loss = F.cross_entropy(out, labels)
25
- acc = accuracy(out, labels)
26
- # print('Validation loss and acc:', loss, acc)
27
- return {'val_loss':loss.detach(), 'val_acc':acc}
28
-
29
- def validation_end_epoch(self, results):
30
- batch_loss = [x['val_loss'] for x in results]
31
- epoch_loss = torch.stack(batch_loss).mean()
32
- batch_acc = [x['val_acc'] for x in results]
33
- epoch_acc = torch.stack(batch_acc).mean()
34
- return {'val_loss':epoch_loss.item(), 'val_acc':epoch_acc.item()}
35
-
36
- # def epoch_end(self, epoch, outputs):
37
- # print(f"Epoch {epoch+1}: train_loss: {outputs['train_loss']}, val_loss: {outputs['val_loss']}, val_acc: {outputs['val_acc']}")
38
-
39
- def epoch_end(self, epoch, result):
40
- print(f"Epoch {epoch+1}: train_loss: {result['train_losses']:.4f}, train_acc: {result['train_acc']:.4f}, \
41
- val_loss: {result['val_loss']:.4f}, val_acc: {result['val_acc']:.4f} ")
42
-
43
-
44
- class Efficient_b2_model(ImageClassificationBase):
45
- def __init__(self, num_classes=len(classes), pretrained=True):
46
- super().__init__()
47
- if pretrained:
48
- if torchvision.__version__ >= '0.13.0':
49
- self.network = torchvision.models.efficientnet_b2(weights=torchvision.models.EfficientNet_B2_Weights.DEFAULT)
50
-
51
- else:
52
- # 1. Get the base mdoel with pretrained weights and send to target device
53
- self.network = torchvision.models.efficientnet_b2(pretrained=True)
54
-
55
- for param in self.network.parameters():
56
- param.requires_grad =False
57
-
58
- self.network.classifier = nn.Sequential(nn.Dropout(p=0.3, inplace=True),
59
- nn.Linear(in_features=1408, out_features=num_classes, bias=True)
60
- )
61
- else:
62
- self.network = torchvision.models.efficientnet_b2()
63
-
64
-
65
- def forward(self, x):
66
- x = self.network(x)
67
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CoderMayhem/repello/app.py DELETED
@@ -1,146 +0,0 @@
1
- import streamlit as st
2
- import requests
3
- import json
4
- import time
5
- import mixpanel
6
- from mixpanel import Mixpanel
7
- from dotenv import load_dotenv
8
- import os
9
- import pandas as pd
10
- import random
11
- from google.cloud import firestore
12
-
13
- # Load environment variables from .env file
14
- load_dotenv()
15
-
16
- api_token = os.getenv("API_TOKEN")
17
- mp = Mixpanel(api_token)
18
-
19
- # Authenticate to Firestore with the JSON account key.
20
- # db = firestore.Client.from_service_account_info()
21
-
22
- # Function to make API request
23
- def make_api_request(prompt):
24
- url = 'http://api.repelloai.com/repello'
25
- headers = {'Content-Type': 'application/json'}
26
- input = {"input" : prompt}
27
- json_string = json.dumps(input, indent=2)
28
- data = {
29
- "data" : json_string
30
- }
31
- # Record the start time
32
- start_time = time.time()
33
-
34
- response = requests.post(url, json=data, headers=headers)
35
-
36
- # Calculate the time taken
37
- end_time = time.time()
38
- time_taken = end_time - start_time
39
-
40
- return response.json(), time_taken
41
-
42
- # Function to create a table for the result categories
43
- def display_result_table(results):
44
- # Create a table with three columns
45
- table_data = []
46
- for model_result in results:
47
- try:
48
- threats = model_result.get("threats", {})
49
- probabilities = model_result.get("probabilities", {})
50
- except AttributeError:
51
- st.error("Error retrieving threats and scores.")
52
- continue
53
-
54
- if isinstance(threats, dict) and isinstance(probabilities, dict):
55
- for threat, probability in probabilities.items():
56
- emoji_flag = "🚨" if threats.get(threat, False) else "👌"
57
- true_or_false = str(threats.get(threat, False))
58
- table_data.append({"Threat": threat, "Detected?": true_or_false, "Probability": probability, "Verdict": emoji_flag})
59
-
60
- # Display the table
61
- if table_data:
62
- st.table(table_data)
63
- else:
64
- st.text("No results to display.")
65
-
66
- # Function to get 4 random prompts from the CSV file
67
- def get_random_prompts():
68
- csv_file_path = "bad_prompts.csv"
69
- df = pd.read_csv(csv_file_path)
70
- random_prompts = df.sample(4)["text"].tolist()
71
- return random_prompts
72
-
73
- # Streamlit app layout
74
- def main():
75
- #Track the event 'Page View'
76
- mp.track('Page View', event_name='New Visitor')
77
- # Set page layout
78
- st.set_page_config(layout="wide")
79
-
80
- # Initialize session state
81
- if 'response' not in st.session_state:
82
- st.session_state.response = None
83
- if 'selected_prompt' not in st.session_state:
84
- st.session_state.selected_prompt = ""
85
- if 'button_texts' not in st.session_state:
86
- st.session_state.button_texts = []
87
- if 'hasSent' not in st.session_state:
88
- st.session_state.prev_response = 0
89
-
90
- # Big, bold heading with magical wand emoji
91
- st.title("Repello 🪄 Playground")
92
-
93
- # Input box for user prompts
94
- prompt = st.text_area("Enter your prompt:", value=st.session_state.selected_prompt)
95
-
96
- if st.button("Send"):
97
- if prompt:
98
- response, time_taken = make_api_request(prompt)
99
- # Example: Track a custom event 'Button Click'
100
- mp.track('Button Click', event_name='Api call')
101
- st.session_state.response = response
102
- st.session_state.time_taken = time_taken/10
103
- st.session_state.hasSent = 1
104
-
105
- # Display result table or JSON response below input box
106
- st.header("Results:")
107
- if st.session_state.response is not None:
108
- results = st.session_state.response.get("responseData", {}).get("results", [])
109
- if results:
110
- display_result_table(results)
111
-
112
- # Display time taken for the response
113
- st.subheader("Time Taken for Response ⏱️")
114
- st.write(f"The response took {st.session_state.time_taken:.4f} seconds.")
115
-
116
- # Button to open Google Form
117
- st.text("To report an issue write to : [email protected]")
118
- if st.session_state.hasSent:
119
- # db.collection("prompts").add({"prompt": st.session_state.selected_prompt})
120
- st.session_state.hasSent = 0
121
-
122
- else:
123
- st.text("The detection results of your prompt will appear here.")
124
- else:
125
- st.text("The detection results of your prompt will appear here.")
126
-
127
- # Left column with buttons
128
- st.sidebar.title("Horcrux Prompts 🚫")
129
- st.sidebar.write("**Try out these perilous prompts which have previously created havoc for LLMs and see if our spell works!**")
130
-
131
- if len(st.session_state.button_texts)==0:
132
- st.session_state.button_texts = get_random_prompts()
133
-
134
- # Button to refresh prompts
135
- if st.sidebar.button("Refresh Prompts 🔄"):
136
- # Clear existing button_texts
137
- st.session_state.button_texts = []
138
- # Get new random prompts
139
- st.session_state.button_texts = get_random_prompts()
140
-
141
- for i, text in enumerate(st.session_state.button_texts, start=1):
142
- if st.sidebar.button(text, key=f"button_{i}", on_click=lambda t=text: st.session_state.update(selected_prompt=t.strip())):
143
- st.session_state.selected_prompt = text.strip()
144
-
145
- if __name__ == "__main__":
146
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/engine/__init__.py DELETED
@@ -1 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_c_i_d_g.py DELETED
@@ -1,19 +0,0 @@
1
- # coding: utf-8
2
- from .otBase import BaseTTXConverter
3
-
4
-
5
- class table__c_i_d_g(BaseTTXConverter):
6
- """The AAT ``cidg`` table has almost the same structure as ``gidc``,
7
- just mapping CIDs to GlyphIDs instead of the reverse direction.
8
-
9
- It is useful for fonts that may be used by a PDF renderer in lieu of
10
- a font reference with a known glyph collection but no subsetted
11
- glyphs. For instance, a PDF can say “please use a font conforming
12
- to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
13
- say, a TrueType font. ``gidc`` is lossy for this purpose and is
14
- obsoleted by ``cidg``.
15
-
16
- For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
17
- (which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table."""
18
-
19
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ca25ec1d.js DELETED
@@ -1,6 +0,0 @@
1
- import{S as M,e as V,s as H,J as R,K as k,p as _,M as v,n as w,A as m,N as y,O as T,U as Z,u as B,v as b,y as C,z as d,P as h,R as A,m as L,G as E,V as Y,Q as q,k as O,o as N,x as S,ai as W,Z as X,$ as x,B as ee,E as te,ae as le,q as ne,r as ie}from"./index-3370be2a.js";import{f as se,B as oe}from"./Button-89624748.js";import{C as re,a as ce}from"./Copy-6cd42558.js";import{E as ae}from"./Empty-585389a4.js";import{B as fe}from"./BlockLabel-56db415e.js";import"./Blocks-f0129fcd.js";function ue(a){let e,t;return{c(){e=R("svg"),t=R("path"),k(t,"fill","currentColor"),k(t,"d","M5 3h2v2H5v5a2 2 0 0 1-2 2a2 2 0 0 1 2 2v5h2v2H5c-1.07-.27-2-.9-2-2v-4a2 2 0 0 0-2-2H0v-2h1a2 2 0 0 0 2-2V5a2 2 0 0 1 2-2m14 0a2 2 0 0 1 2 2v4a2 2 0 0 0 2 2h1v2h-1a2 2 0 0 0-2 2v4a2 2 0 0 1-2 2h-2v-2h2v-5a2 2 0 0 1 2-2a2 2 0 0 1-2-2V5h-2V3h2m-7 12a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m-4 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1m8 0a1 1 0 0 1 1 1a1 1 0 0 1-1 1a1 1 0 0 1-1-1a1 1 0 0 1 1-1Z"),k(e,"xmlns","http://www.w3.org/2000/svg"),k(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),k(e,"aria-hidden","true"),k(e,"role","img"),k(e,"class","iconify iconify--mdi"),k(e,"width","100%"),k(e,"height","100%"),k(e,"preserveAspectRatio","xMidYMid meet"),k(e,"viewBox","0 0 24 24")},m(l,i){_(l,e,i),v(e,t)},p:w,i:w,o:w,d(l){l&&m(e)}}}let F=class extends M{constructor(e){super(),V(this,e,null,ue,H,{})}};function $(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function z(a,e,t){const l=a.slice();return l[5]=e[t],l[7]=t,l}function _e(a){let e,t;return{c(){e=y("div"),t=h(a[1]),k(e,"class","json-item svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&A(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function me(a){let e,t;return{c(){e=y("div"),t=h(a[1]),k(e,"class","json-item number svelte-1kspdo")},m(l,i){_(l,e,i),v(e,t)},p(l,i){i&2&&A(t,l[1])},i:w,o:w,d(l){l&&m(e)}}}function de(a){let e,t=a[1].toLocaleString()+"",l;return{c(){e=y("div"),l=h(t),k(e,"class","json-item bool svelte-1kspdo")},m(i,r){_(i,e,r),v(e,l)},p(i,r){r&2&&t!==(t=i[1].toLocaleString()+"")&&A(l,t)},i:w,o:w,d(i){i&&m(e)}}}function be(a){let e,t,l,i;return{c(){e=y("div"),t=h('"'),l=h(a[1]),i=h('"'),k(e,"class","json-item string svelte-1kspdo")},m(r,o){_(r,e,o),v(e,t),v(e,l),v(e,i)},p(r,o){o&2&&A(l,r[1])},i:w,o:w,d(r){r&&m(e)}}}function pe(a){let e;return{c(){e=y("div"),e.textContent="null",k(e,"class","json-item null svelte-1kspdo")},m(t,l){_(t,e,l)},p:w,i:w,o:w,d(t){t&&m(e)}}}function ke(a){let e,t,l,i;const r=[ge,ve],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=L()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(B(),b(o[c],1,1,()=>{o[c]=null}),C(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function he(a){let e,t,l,i;const r=[ye,we],o=[];function f(n,s){return n[0]?0:1}return e=f(a),t=o[e]=r[e](a),{c(){t.c(),l=L()},m(n,s){o[e].m(n,s),_(n,l,s),i=!0},p(n,s){let c=e;e=f(n),e===c?o[e].p(n,s):(B(),b(o[c],1,1,()=>{o[c]=null}),C(),t=o[e],t?t.p(n,s):(t=o[e]=r[e](n),t.c()),d(t,1),t.m(l.parentNode,l))},i(n){i||(d(t),i=!0)},o(n){b(t),i=!1},d(n){n&&m(l),o[e].d(n)}}}function ve(a){let e,t,l,i,r=E(Object.entries(a[1])),o=[];for(let n=0;n<r.length;n+=1)o[n]=I($(a,r,n));const f=n=>b(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`{
2
- `),t=y("div");for(let n=0;n<o.length;n+=1)o[n].c();l=h(`
3
- }`),k(t,"class","children svelte-1kspdo")},m(n,s){_(n,e,s),_(n,t,s);for(let c=0;c<o.length;c+=1)o[c]&&o[c].m(t,null);_(n,l,s),i=!0},p(n,s){if(s&6){r=E(Object.entries(n[1]));let c;for(c=0;c<r.length;c+=1){const u=$(n,r,c);o[c]?(o[c].p(u,s),d(o[c],1)):(o[c]=I(u),o[c].c(),d(o[c],1),o[c].m(t,null))}for(B(),c=r.length;c<o.length;c+=1)f(c);C()}},i(n){if(!i){for(let s=0;s<r.length;s+=1)d(o[s]);i=!0}},o(n){o=o.filter(Boolean);for(let s=0;s<o.length;s+=1)b(o[s]);i=!1},d(n){n&&(m(e),m(t),m(l)),Y(o,n)}}}function ge(a){let e,t,l=Object.keys(a[1]).length+"",i,r,o,f;return{c(){e=y("button"),t=h("{+"),i=h(l),r=h(" items}")},m(n,s){_(n,e,s),v(e,t),v(e,i),v(e,r),o||(f=q(e,"click",a[4]),o=!0)},p(n,s){s&2&&l!==(l=Object.keys(n[1]).length+"")&&A(i,l)},i:w,o:w,d(n){n&&m(e),o=!1,f()}}}function G(a){let e;return{c(){e=h(",")},m(t,l){_(t,e,l)},d(t){t&&m(e)}}}function I(a){let e,t=a[5][0]+"",l,i,r,o=a[7]!==Object.keys(a[1]).length-1,f,n;r=new D({props:{value:a[5][1],depth:a[2]+1,key:a[7]}});let s=o&&G();return{c(){e=y("div"),l=h(t),i=h(": "),O(r.$$.fragment),s&&s.c(),f=T()},m(c,u){_(c,e,u),v(e,l),v(e,i),N(r,e,null),s&&s.m(e,null),v(e,f),n=!0},p(c,u){(!n||u&2)&&t!==(t=c[5][0]+"")&&A(l,t);const j={};u&2&&(j.value=c[5][1]),u&4&&(j.depth=c[2]+1),r.$set(j),u&2&&(o=c[7]!==Object.keys(c[1]).length-1),o?s||(s=G(),s.c(),s.m(e,f)):s&&(s.d(1),s=null)},i(c){n||(d(r.$$.fragment,c),n=!0)},o(c){b(r.$$.fragment,c),n=!1},d(c){c&&m(e),S(r),s&&s.d()}}}function we(a){let e,t,l,i,r=E(a[1]),o=[];for(let n=0;n<r.length;n+=1)o[n]=Q(z(a,r,n));const f=n=>b(o[n],1,1,()=>{o[n]=null});return{c(){e=h(`[
4
- `),t=y("div");for(let n=0;n<o.length;n+=1)o[n].c();l=h(`
5
- ]`),k(t,"class","children svelte-1kspdo")},m(n,s){_(n,e,s),_(n,t,s);for(let c=0;c<o.length;c+=1)o[c]&&o[c].m(t,null);_(n,l,s),i=!0},p(n,s){if(s&6){r=E(n[1]);let c;for(c=0;c<r.length;c+=1){const u=z(n,r,c);o[c]?(o[c].p(u,s),d(o[c],1)):(o[c]=Q(u),o[c].c(),d(o[c],1),o[c].m(t,null))}for(B(),c=r.length;c<o.length;c+=1)f(c);C()}},i(n){if(!i){for(let s=0;s<r.length;s+=1)d(o[s]);i=!0}},o(n){o=o.filter(Boolean);for(let s=0;s<o.length;s+=1)b(o[s]);i=!1},d(n){n&&(m(e),m(t),m(l)),Y(o,n)}}}function ye(a){let e,t,l,i=a[1].length+"",r,o,f,n;return{c(){e=y("button"),t=y("span"),l=h("expand "),r=h(i),o=h(" children"),k(t,"class","expand-array svelte-1kspdo")},m(s,c){_(s,e,c),v(e,t),v(t,l),v(t,r),v(t,o),f||(n=q(e,"click",a[3]),f=!0)},p(s,c){c&2&&i!==(i=s[1].length+"")&&A(r,i)},i:w,o:w,d(s){s&&m(e),f=!1,n()}}}function K(a){let e;return{c(){e=h(",")},m(t,l){_(t,e,l)},d(t){t&&m(e)}}}function Q(a){let e,t,l,i,r,o,f;i=new D({props:{value:a[5],depth:a[2]+1}});let n=a[7]!==a[1].length-1&&K();return{c(){e=y("div"),t=h(a[7]),l=h(": "),O(i.$$.fragment),r=T(),n&&n.c(),o=T()},m(s,c){_(s,e,c),v(e,t),v(e,l),N(i,e,null),v(e,r),n&&n.m(e,null),v(e,o),f=!0},p(s,c){const u={};c&2&&(u.value=s[5]),c&4&&(u.depth=s[2]+1),i.$set(u),s[7]!==s[1].length-1?n||(n=K(),n.c(),n.m(e,o)):n&&(n.d(1),n=null)},i(s){f||(d(i.$$.fragment,s),f=!0)},o(s){b(i.$$.fragment,s),f=!1},d(s){s&&m(e),S(i),n&&n.d()}}}function je(a){let e,t,l,i,r,o;const f=[he,ke,pe,be,de,me,_e],n=[];function s(c,u){return c[1]instanceof Array?0:c[1]instanceof Object?1:c[1]===null?2:typeof c[1]=="string"?3:typeof c[1]=="boolean"?4:typeof c[1]=="number"?5:6}return i=s(a),r=n[i]=f[i](a),{c(){e=y("span"),t=T(),l=y("div"),r.c(),k(e,"class","spacer svelte-1kspdo"),Z(e,"mt-10",a[2]===0),k(l,"class","json-node svelte-1kspdo")},m(c,u){_(c,e,u),_(c,t,u),_(c,l,u),n[i].m(l,null),o=!0},p(c,[u]){(!o||u&4)&&Z(e,"mt-10",c[2]===0);let j=i;i=s(c),i===j?n[i].p(c,u):(B(),b(n[j],1,1,()=>{n[j]=null}),C(),r=n[i],r?r.p(c,u):(r=n[i]=f[i](c),r.c()),d(r,1),r.m(l,null))},i(c){o||(d(r),o=!0)},o(c){b(r),o=!1},d(c){c&&(m(e),m(t),m(l)),n[i].d()}}}function Oe(a,e,t){let{value:l}=e,{depth:i}=e,{collapsed:r=i>4}=e;const o=()=>{t(0,r=!1)},f=()=>{t(0,r=!1)};return a.$$set=n=>{"value"in n&&t(1,l=n.value),"depth"in n&&t(2,i=n.depth),"collapsed"in n&&t(0,r=n.collapsed)},[r,l,i,o,f]}class D extends M{constructor(e){super(),V(this,e,Oe,je,H,{value:1,depth:2,collapsed:0})}}function Ne(a){let e,t;return e=new ae({props:{$$slots:{default:[Je]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,i){const r={};i&32&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Se(a){let e,t,l,i,r,o,f,n,s;const c=[Ce,Be],u=[];function j(g,J){return g[1]?0:1}return t=j(a),l=u[t]=c[t](a),o=new D({props:{value:a[0],depth:0}}),{c(){e=y("button"),l.c(),i=T(),r=y("div"),O(o.$$.fragment),k(e,"class","svelte-1trjy9a"),k(r,"class","json-holder svelte-1trjy9a")},m(g,J){_(g,e,J),u[t].m(e,null),_(g,i,J),_(g,r,J),N(o,r,null),f=!0,n||(s=q(e,"click",a[2]),n=!0)},p(g,J){let p=t;t=j(g),t!==p&&(B(),b(u[p],1,1,()=>{u[p]=null}),C(),l=u[t],l||(l=u[t]=c[t](g),l.c()),d(l,1),l.m(e,null));const P={};J&1&&(P.value=g[0]),o.$set(P)},i(g){f||(d(l),d(o.$$.fragment,g),f=!0)},o(g){b(l),b(o.$$.fragment,g),f=!1},d(g){g&&(m(e),m(i),m(r)),u[t].d(),S(o),n=!1,s()}}}function Je(a){let e,t;return e=new F({}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Be(a){let e,t,l;return t=new re({}),{c(){e=y("span"),O(t.$$.fragment),k(e,"class","copy-text")},m(i,r){_(i,e,r),N(t,e,null),l=!0},i(i){l||(d(t.$$.fragment,i),l=!0)},o(i){b(t.$$.fragment,i),l=!1},d(i){i&&m(e),S(t)}}}function Ce(a){let e,t,l,i;return t=new ce({}),{c(){e=y("span"),O(t.$$.fragment)},m(r,o){_(r,e,o),N(t,e,null),i=!0},i(r){i||(d(t.$$.fragment,r),r&&(l||X(()=>{l=x(e,se,{duration:300}),l.start()})),i=!0)},o(r){b(t.$$.fragment,r),i=!1},d(r){r&&m(e),S(t)}}}function Te(a){let e,t,l,i,r;const o=[Se,Ne],f=[];function n(s,c){return c&1&&(e=null),e==null&&(e=!!(s[0]&&s[0]!=='""'&&!Ae(s[0]))),e?0:1}return t=n(a,-1),l=f[t]=o[t](a),{c(){l.c(),i=L()},m(s,c){f[t].m(s,c),_(s,i,c),r=!0},p(s,[c]){let u=t;t=n(s,c),t===u?f[t].p(s,c):(B(),b(f[u],1,1,()=>{f[u]=null}),C(),l=f[t],l?l.p(s,c):(l=f[t]=o[t](s),l.c()),d(l,1),l.m(i.parentNode,i))},i(s){r||(d(l),r=!0)},o(s){b(l),r=!1},d(s){s&&m(i),f[t].d(s)}}}function Ae(a){return a&&Object.keys(a).length===0&&Object.getPrototypeOf(a)===Object.prototype}function Ee(a,e,t){let{value:l={}}=e,i=!1,r;function o(){t(1,i=!0),r&&clearTimeout(r),r=setTimeout(()=>{t(1,i=!1)},1e3)}async function f(){"clipboard"in navigator&&(await navigator.clipboard.writeText(JSON.stringify(l,null,2)),o())}return W(()=>{r&&clearTimeout(r)}),a.$$set=n=>{"value"in n&&t(0,l=n.value)},[l,i,f]}class Me extends M{constructor(e){super(),V(this,e,Ee,Te,H,{value:0})}}function U(a){let e,t;return e=new fe({props:{Icon:F,show_label:a[6],label:a[5],float:!1,disable:a[7]===!1}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,i){const r={};i&64&&(r.show_label=l[6]),i&32&&(r.label=l[5]),i&128&&(r.disable=l[7]===!1),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Ve(a){let e,t,l,i,r,o=a[5]&&U(a);const f=[a[4]];let n={};for(let s=0;s<f.length;s+=1)n=te(n,f[s]);return t=new le({props:n}),i=new Me({props:{value:a[3]}}),{c(){o&&o.c(),e=T(),O(t.$$.fragment),l=T(),O(i.$$.fragment)},m(s,c){o&&o.m(s,c),_(s,e,c),N(t,s,c),_(s,l,c),N(i,s,c),r=!0},p(s,c){s[5]?o?(o.p(s,c),c&32&&d(o,1)):(o=U(s),o.c(),d(o,1),o.m(e.parentNode,e)):o&&(B(),b(o,1,1,()=>{o=null}),C());const u=c&16?ne(f,[ie(s[4])]):{};t.$set(u);const j={};c&8&&(j.value=s[3]),i.$set(j)},i(s){r||(d(o),d(t.$$.fragment,s),d(i.$$.fragment,s),r=!0)},o(s){b(o),b(t.$$.fragment,s),b(i.$$.fragment,s),r=!1},d(s){s&&(m(e),m(l)),o&&o.d(s),S(t,s),S(i,s)}}}function He(a){let e,t;return e=new oe({props:{visible:a[2],test_id:"json",elem_id:a[0],elem_classes:a[1],container:a[7],scale:a[8],min_width:a[9],padding:!1,$$slots:{default:[Ve]},$$scope:{ctx:a}}}),{c(){O(e.$$.fragment)},m(l,i){N(e,l,i),t=!0},p(l,[i]){const r={};i&4&&(r.visible=l[2]),i&1&&(r.elem_id=l[0]),i&2&&(r.elem_classes=l[1]),i&128&&(r.container=l[7]),i&256&&(r.scale=l[8]),i&512&&(r.min_width=l[9]),i&4344&&(r.$$scope={dirty:i,ctx:l}),e.$set(r)},i(l){t||(d(e.$$.fragment,l),t=!0)},o(l){b(e.$$.fragment,l),t=!1},d(l){S(e,l)}}}function Le(a,e,t){let{elem_id:l=""}=e,{elem_classes:i=[]}=e,{visible:r=!0}=e,{value:o}=e,f,{loading_status:n}=e,{label:s}=e,{show_label:c}=e,{container:u=!0}=e,{scale:j=null}=e,{min_width:g=void 0}=e;const J=ee();return a.$$set=p=>{"elem_id"in p&&t(0,l=p.elem_id),"elem_classes"in p&&t(1,i=p.elem_classes),"visible"in p&&t(2,r=p.visible),"value"in p&&t(3,o=p.value),"loading_status"in p&&t(4,n=p.loading_status),"label"in p&&t(5,s=p.label),"show_label"in p&&t(6,c=p.show_label),"container"in p&&t(7,u=p.container),"scale"in p&&t(8,j=p.scale),"min_width"in p&&t(9,g=p.min_width)},a.$$.update=()=>{a.$$.dirty&1032&&o!==f&&(t(10,f=o),J("change"))},[l,i,r,o,n,s,c,u,j,g,f]}class qe extends M{constructor(e){super(),V(this,e,Le,He,H,{elem_id:0,elem_classes:1,visible:2,value:3,loading_status:4,label:5,show_label:6,container:7,scale:8,min_width:9})}}const Ie=qe,Ke=["static"],Qe=a=>({type:{payload:"Object | Array"},description:{payload:"JSON object"}});export{Ie as Component,Qe as document,Ke as modes};
6
- //# sourceMappingURL=index-ca25ec1d.js.map
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/wrapper-6f348d45-38be7a64.js DELETED
@@ -1,8 +0,0 @@
1
- import S from"./__vite-browser-external-b25bb000.js";function z(s){return s&&s.__esModule&&Object.prototype.hasOwnProperty.call(s,"default")?s.default:s}function gt(s){if(s.__esModule)return s;var e=s.default;if(typeof e=="function"){var t=function r(){if(this instanceof r){var i=[null];i.push.apply(i,arguments);var n=Function.bind.apply(e,i);return new n}return e.apply(this,arguments)};t.prototype=e.prototype}else t={};return Object.defineProperty(t,"__esModule",{value:!0}),Object.keys(s).forEach(function(r){var i=Object.getOwnPropertyDescriptor(s,r);Object.defineProperty(t,r,i.get?i:{enumerable:!0,get:function(){return s[r]}})}),t}const{Duplex:yt}=S;function Oe(s){s.emit("close")}function vt(){!this.destroyed&&this._writableState.finished&&this.destroy()}function Qe(s){this.removeListener("error",Qe),this.destroy(),this.listenerCount("error")===0&&this.emit("error",s)}function St(s,e){let t=!0;const r=new yt({...e,autoDestroy:!1,emitClose:!1,objectMode:!1,writableObjectMode:!1});return s.on("message",function(n,o){const l=!o&&r._readableState.objectMode?n.toString():n;r.push(l)||s.pause()}),s.once("error",function(n){r.destroyed||(t=!1,r.destroy(n))}),s.once("close",function(){r.destroyed||r.push(null)}),r._destroy=function(i,n){if(s.readyState===s.CLOSED){n(i),process.nextTick(Oe,r);return}let o=!1;s.once("error",function(f){o=!0,n(f)}),s.once("close",function(){o||n(i),process.nextTick(Oe,r)}),t&&s.terminate()},r._final=function(i){if(s.readyState===s.CONNECTING){s.once("open",function(){r._final(i)});return}s._socket!==null&&(s._socket._writableState.finished?(i(),r._readableState.endEmitted&&r.destroy()):(s._socket.once("finish",function(){i()}),s.close()))},r._read=function(){s.isPaused&&s.resume()},r._write=function(i,n,o){if(s.readyState===s.CONNECTING){s.once("open",function(){r._write(i,n,o)});return}s.send(i,o)},r.on("end",vt),r.on("error",Qe),r}var Et=St;const Vs=z(Et);var te={exports:{}},U={BINARY_TYPES:["nodebuffer","arraybuffer","fragments"],EMPTY_BUFFER:Buffer.alloc(0),GUID:"258EAFA5-E914-47DA-95CA-C5AB0DC85B11",kForOnEventAttribute:Symbol("kIsForOnEventAttribute"),kListener:Symbol("kListener"),kStatusCode:Symbol("status-code"),kWebSocket:Symbol("websocket"),NOOP:()=>{}},bt,xt;const{EMPTY_BUFFER:kt}=U,Se=Buffer[Symbol.species];function wt(s,e){if(s.length===0)return kt;if(s.length===1)return s[0];const t=Buffer.allocUnsafe(e);let r=0;for(let i=0;i<s.length;i++){const n=s[i];t.set(n,r),r+=n.length}return r<e?new Se(t.buffer,t.byteOffset,r):t}function Je(s,e,t,r,i){for(let n=0;n<i;n++)t[r+n]=s[n]^e[n&3]}function et(s,e){for(let t=0;t<s.length;t++)s[t]^=e[t&3]}function Ot(s){return s.length===s.buffer.byteLength?s.buffer:s.buffer.slice(s.byteOffset,s.byteOffset+s.length)}function Ee(s){if(Ee.readOnly=!0,Buffer.isBuffer(s))return s;let e;return s instanceof ArrayBuffer?e=new Se(s):ArrayBuffer.isView(s)?e=new Se(s.buffer,s.byteOffset,s.byteLength):(e=Buffer.from(s),Ee.readOnly=!1),e}te.exports={concat:wt,mask:Je,toArrayBuffer:Ot,toBuffer:Ee,unmask:et};if(!{}.WS_NO_BUFFER_UTIL)try{const s=require("bufferutil");xt=te.exports.mask=function(e,t,r,i,n){n<48?Je(e,t,r,i,n):s.mask(e,t,r,i,n)},bt=te.exports.unmask=function(e,t){e.length<32?et(e,t):s.unmask(e,t)}}catch{}var ne=te.exports;const Ce=Symbol("kDone"),ue=Symbol("kRun");let Ct=class{constructor(e){this[Ce]=()=>{this.pending--,this[ue]()},this.concurrency=e||1/0,this.jobs=[],this.pending=0}add(e){this.jobs.push(e),this[ue]()}[ue](){if(this.pending!==this.concurrency&&this.jobs.length){const e=this.jobs.shift();this.pending++,e(this[Ce])}}};var Tt=Ct;const W=S,Te=ne,Lt=Tt,{kStatusCode:tt}=U,Nt=Buffer[Symbol.species],Pt=Buffer.from([0,0,255,255]),se=Symbol("permessage-deflate"),w=Symbol("total-length"),V=Symbol("callback"),C=Symbol("buffers"),J=Symbol("error");let K,Rt=class{constructor(e,t,r){if(this._maxPayload=r|0,this._options=e||{},this._threshold=this._options.threshold!==void 0?this._options.threshold:1024,this._isServer=!!t,this._deflate=null,this._inflate=null,this.params=null,!K){const i=this._options.concurrencyLimit!==void 0?this._options.concurrencyLimit:10;K=new Lt(i)}}static get extensionName(){return"permessage-deflate"}offer(){const e={};return this._options.serverNoContextTakeover&&(e.server_no_context_takeover=!0),this._options.clientNoContextTakeover&&(e.client_no_context_takeover=!0),this._options.serverMaxWindowBits&&(e.server_max_window_bits=this._options.serverMaxWindowBits),this._options.clientMaxWindowBits?e.client_max_window_bits=this._options.clientMaxWindowBits:this._options.clientMaxWindowBits==null&&(e.client_max_window_bits=!0),e}accept(e){return e=this.normalizeParams(e),this.params=this._isServer?this.acceptAsServer(e):this.acceptAsClient(e),this.params}cleanup(){if(this._inflate&&(this._inflate.close(),this._inflate=null),this._deflate){const e=this._deflate[V];this._deflate.close(),this._deflate=null,e&&e(new Error("The deflate stream was closed while data was being processed"))}}acceptAsServer(e){const t=this._options,r=e.find(i=>!(t.serverNoContextTakeover===!1&&i.server_no_context_takeover||i.server_max_window_bits&&(t.serverMaxWindowBits===!1||typeof t.serverMaxWindowBits=="number"&&t.serverMaxWindowBits>i.server_max_window_bits)||typeof t.clientMaxWindowBits=="number"&&!i.client_max_window_bits));if(!r)throw new Error("None of the extension offers can be accepted");return t.serverNoContextTakeover&&(r.server_no_context_takeover=!0),t.clientNoContextTakeover&&(r.client_no_context_takeover=!0),typeof t.serverMaxWindowBits=="number"&&(r.server_max_window_bits=t.serverMaxWindowBits),typeof t.clientMaxWindowBits=="number"?r.client_max_window_bits=t.clientMaxWindowBits:(r.client_max_window_bits===!0||t.clientMaxWindowBits===!1)&&delete r.client_max_window_bits,r}acceptAsClient(e){const t=e[0];if(this._options.clientNoContextTakeover===!1&&t.client_no_context_takeover)throw new Error('Unexpected parameter "client_no_context_takeover"');if(!t.client_max_window_bits)typeof this._options.clientMaxWindowBits=="number"&&(t.client_max_window_bits=this._options.clientMaxWindowBits);else if(this._options.clientMaxWindowBits===!1||typeof this._options.clientMaxWindowBits=="number"&&t.client_max_window_bits>this._options.clientMaxWindowBits)throw new Error('Unexpected or invalid parameter "client_max_window_bits"');return t}normalizeParams(e){return e.forEach(t=>{Object.keys(t).forEach(r=>{let i=t[r];if(i.length>1)throw new Error(`Parameter "${r}" must have only a single value`);if(i=i[0],r==="client_max_window_bits"){if(i!==!0){const n=+i;if(!Number.isInteger(n)||n<8||n>15)throw new TypeError(`Invalid value for parameter "${r}": ${i}`);i=n}else if(!this._isServer)throw new TypeError(`Invalid value for parameter "${r}": ${i}`)}else if(r==="server_max_window_bits"){const n=+i;if(!Number.isInteger(n)||n<8||n>15)throw new TypeError(`Invalid value for parameter "${r}": ${i}`);i=n}else if(r==="client_no_context_takeover"||r==="server_no_context_takeover"){if(i!==!0)throw new TypeError(`Invalid value for parameter "${r}": ${i}`)}else throw new Error(`Unknown parameter "${r}"`);t[r]=i})}),e}decompress(e,t,r){K.add(i=>{this._decompress(e,t,(n,o)=>{i(),r(n,o)})})}compress(e,t,r){K.add(i=>{this._compress(e,t,(n,o)=>{i(),r(n,o)})})}_decompress(e,t,r){const i=this._isServer?"client":"server";if(!this._inflate){const n=`${i}_max_window_bits`,o=typeof this.params[n]!="number"?W.Z_DEFAULT_WINDOWBITS:this.params[n];this._inflate=W.createInflateRaw({...this._options.zlibInflateOptions,windowBits:o}),this._inflate[se]=this,this._inflate[w]=0,this._inflate[C]=[],this._inflate.on("error",Bt),this._inflate.on("data",st)}this._inflate[V]=r,this._inflate.write(e),t&&this._inflate.write(Pt),this._inflate.flush(()=>{const n=this._inflate[J];if(n){this._inflate.close(),this._inflate=null,r(n);return}const o=Te.concat(this._inflate[C],this._inflate[w]);this._inflate._readableState.endEmitted?(this._inflate.close(),this._inflate=null):(this._inflate[w]=0,this._inflate[C]=[],t&&this.params[`${i}_no_context_takeover`]&&this._inflate.reset()),r(null,o)})}_compress(e,t,r){const i=this._isServer?"server":"client";if(!this._deflate){const n=`${i}_max_window_bits`,o=typeof this.params[n]!="number"?W.Z_DEFAULT_WINDOWBITS:this.params[n];this._deflate=W.createDeflateRaw({...this._options.zlibDeflateOptions,windowBits:o}),this._deflate[w]=0,this._deflate[C]=[],this._deflate.on("data",Ut)}this._deflate[V]=r,this._deflate.write(e),this._deflate.flush(W.Z_SYNC_FLUSH,()=>{if(!this._deflate)return;let n=Te.concat(this._deflate[C],this._deflate[w]);t&&(n=new Nt(n.buffer,n.byteOffset,n.length-4)),this._deflate[V]=null,this._deflate[w]=0,this._deflate[C]=[],t&&this.params[`${i}_no_context_takeover`]&&this._deflate.reset(),r(null,n)})}};var oe=Rt;function Ut(s){this[C].push(s),this[w]+=s.length}function st(s){if(this[w]+=s.length,this[se]._maxPayload<1||this[w]<=this[se]._maxPayload){this[C].push(s);return}this[J]=new RangeError("Max payload size exceeded"),this[J].code="WS_ERR_UNSUPPORTED_MESSAGE_LENGTH",this[J][tt]=1009,this.removeListener("data",st),this.reset()}function Bt(s){this[se]._inflate=null,s[tt]=1007,this[V](s)}var re={exports:{}};const $t={},Mt=Object.freeze(Object.defineProperty({__proto__:null,default:$t},Symbol.toStringTag,{value:"Module"})),It=gt(Mt);var Le;const{isUtf8:Ne}=S,Dt=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,1,1,0,0,1,1,0,1,1,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,0];function Wt(s){return s>=1e3&&s<=1014&&s!==1004&&s!==1005&&s!==1006||s>=3e3&&s<=4999}function be(s){const e=s.length;let t=0;for(;t<e;)if(!(s[t]&128))t++;else if((s[t]&224)===192){if(t+1===e||(s[t+1]&192)!==128||(s[t]&254)===192)return!1;t+=2}else if((s[t]&240)===224){if(t+2>=e||(s[t+1]&192)!==128||(s[t+2]&192)!==128||s[t]===224&&(s[t+1]&224)===128||s[t]===237&&(s[t+1]&224)===160)return!1;t+=3}else if((s[t]&248)===240){if(t+3>=e||(s[t+1]&192)!==128||(s[t+2]&192)!==128||(s[t+3]&192)!==128||s[t]===240&&(s[t+1]&240)===128||s[t]===244&&s[t+1]>143||s[t]>244)return!1;t+=4}else return!1;return!0}re.exports={isValidStatusCode:Wt,isValidUTF8:be,tokenChars:Dt};if(Ne)Le=re.exports.isValidUTF8=function(s){return s.length<24?be(s):Ne(s)};else if(!{}.WS_NO_UTF_8_VALIDATE)try{const s=It;Le=re.exports.isValidUTF8=function(e){return e.length<32?be(e):s(e)}}catch{}var ae=re.exports;const{Writable:At}=S,Pe=oe,{BINARY_TYPES:Ft,EMPTY_BUFFER:Re,kStatusCode:jt,kWebSocket:Gt}=U,{concat:de,toArrayBuffer:Vt,unmask:Ht}=ne,{isValidStatusCode:zt,isValidUTF8:Ue}=ae,X=Buffer[Symbol.species],A=0,Be=1,$e=2,Me=3,_e=4,Yt=5;let qt=class extends At{constructor(e={}){super(),this._binaryType=e.binaryType||Ft[0],this._extensions=e.extensions||{},this._isServer=!!e.isServer,this._maxPayload=e.maxPayload|0,this._skipUTF8Validation=!!e.skipUTF8Validation,this[Gt]=void 0,this._bufferedBytes=0,this._buffers=[],this._compressed=!1,this._payloadLength=0,this._mask=void 0,this._fragmented=0,this._masked=!1,this._fin=!1,this._opcode=0,this._totalPayloadLength=0,this._messageLength=0,this._fragments=[],this._state=A,this._loop=!1}_write(e,t,r){if(this._opcode===8&&this._state==A)return r();this._bufferedBytes+=e.length,this._buffers.push(e),this.startLoop(r)}consume(e){if(this._bufferedBytes-=e,e===this._buffers[0].length)return this._buffers.shift();if(e<this._buffers[0].length){const r=this._buffers[0];return this._buffers[0]=new X(r.buffer,r.byteOffset+e,r.length-e),new X(r.buffer,r.byteOffset,e)}const t=Buffer.allocUnsafe(e);do{const r=this._buffers[0],i=t.length-e;e>=r.length?t.set(this._buffers.shift(),i):(t.set(new Uint8Array(r.buffer,r.byteOffset,e),i),this._buffers[0]=new X(r.buffer,r.byteOffset+e,r.length-e)),e-=r.length}while(e>0);return t}startLoop(e){let t;this._loop=!0;do switch(this._state){case A:t=this.getInfo();break;case Be:t=this.getPayloadLength16();break;case $e:t=this.getPayloadLength64();break;case Me:this.getMask();break;case _e:t=this.getData(e);break;default:this._loop=!1;return}while(this._loop);e(t)}getInfo(){if(this._bufferedBytes<2){this._loop=!1;return}const e=this.consume(2);if(e[0]&48)return this._loop=!1,g(RangeError,"RSV2 and RSV3 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_2_3");const t=(e[0]&64)===64;if(t&&!this._extensions[Pe.extensionName])return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(this._fin=(e[0]&128)===128,this._opcode=e[0]&15,this._payloadLength=e[1]&127,this._opcode===0){if(t)return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(!this._fragmented)return this._loop=!1,g(RangeError,"invalid opcode 0",!0,1002,"WS_ERR_INVALID_OPCODE");this._opcode=this._fragmented}else if(this._opcode===1||this._opcode===2){if(this._fragmented)return this._loop=!1,g(RangeError,`invalid opcode ${this._opcode}`,!0,1002,"WS_ERR_INVALID_OPCODE");this._compressed=t}else if(this._opcode>7&&this._opcode<11){if(!this._fin)return this._loop=!1,g(RangeError,"FIN must be set",!0,1002,"WS_ERR_EXPECTED_FIN");if(t)return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(this._payloadLength>125||this._opcode===8&&this._payloadLength===1)return this._loop=!1,g(RangeError,`invalid payload length ${this._payloadLength}`,!0,1002,"WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH")}else return this._loop=!1,g(RangeError,`invalid opcode ${this._opcode}`,!0,1002,"WS_ERR_INVALID_OPCODE");if(!this._fin&&!this._fragmented&&(this._fragmented=this._opcode),this._masked=(e[1]&128)===128,this._isServer){if(!this._masked)return this._loop=!1,g(RangeError,"MASK must be set",!0,1002,"WS_ERR_EXPECTED_MASK")}else if(this._masked)return this._loop=!1,g(RangeError,"MASK must be clear",!0,1002,"WS_ERR_UNEXPECTED_MASK");if(this._payloadLength===126)this._state=Be;else if(this._payloadLength===127)this._state=$e;else return this.haveLength()}getPayloadLength16(){if(this._bufferedBytes<2){this._loop=!1;return}return this._payloadLength=this.consume(2).readUInt16BE(0),this.haveLength()}getPayloadLength64(){if(this._bufferedBytes<8){this._loop=!1;return}const e=this.consume(8),t=e.readUInt32BE(0);return t>Math.pow(2,53-32)-1?(this._loop=!1,g(RangeError,"Unsupported WebSocket frame: payload length > 2^53 - 1",!1,1009,"WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH")):(this._payloadLength=t*Math.pow(2,32)+e.readUInt32BE(4),this.haveLength())}haveLength(){if(this._payloadLength&&this._opcode<8&&(this._totalPayloadLength+=this._payloadLength,this._totalPayloadLength>this._maxPayload&&this._maxPayload>0))return this._loop=!1,g(RangeError,"Max payload size exceeded",!1,1009,"WS_ERR_UNSUPPORTED_MESSAGE_LENGTH");this._masked?this._state=Me:this._state=_e}getMask(){if(this._bufferedBytes<4){this._loop=!1;return}this._mask=this.consume(4),this._state=_e}getData(e){let t=Re;if(this._payloadLength){if(this._bufferedBytes<this._payloadLength){this._loop=!1;return}t=this.consume(this._payloadLength),this._masked&&this._mask[0]|this._mask[1]|this._mask[2]|this._mask[3]&&Ht(t,this._mask)}if(this._opcode>7)return this.controlMessage(t);if(this._compressed){this._state=Yt,this.decompress(t,e);return}return t.length&&(this._messageLength=this._totalPayloadLength,this._fragments.push(t)),this.dataMessage()}decompress(e,t){this._extensions[Pe.extensionName].decompress(e,this._fin,(i,n)=>{if(i)return t(i);if(n.length){if(this._messageLength+=n.length,this._messageLength>this._maxPayload&&this._maxPayload>0)return t(g(RangeError,"Max payload size exceeded",!1,1009,"WS_ERR_UNSUPPORTED_MESSAGE_LENGTH"));this._fragments.push(n)}const o=this.dataMessage();if(o)return t(o);this.startLoop(t)})}dataMessage(){if(this._fin){const e=this._messageLength,t=this._fragments;if(this._totalPayloadLength=0,this._messageLength=0,this._fragmented=0,this._fragments=[],this._opcode===2){let r;this._binaryType==="nodebuffer"?r=de(t,e):this._binaryType==="arraybuffer"?r=Vt(de(t,e)):r=t,this.emit("message",r,!0)}else{const r=de(t,e);if(!this._skipUTF8Validation&&!Ue(r))return this._loop=!1,g(Error,"invalid UTF-8 sequence",!0,1007,"WS_ERR_INVALID_UTF8");this.emit("message",r,!1)}}this._state=A}controlMessage(e){if(this._opcode===8)if(this._loop=!1,e.length===0)this.emit("conclude",1005,Re),this.end();else{const t=e.readUInt16BE(0);if(!zt(t))return g(RangeError,`invalid status code ${t}`,!0,1002,"WS_ERR_INVALID_CLOSE_CODE");const r=new X(e.buffer,e.byteOffset+2,e.length-2);if(!this._skipUTF8Validation&&!Ue(r))return g(Error,"invalid UTF-8 sequence",!0,1007,"WS_ERR_INVALID_UTF8");this.emit("conclude",t,r),this.end()}else this._opcode===9?this.emit("ping",e):this.emit("pong",e);this._state=A}};var rt=qt;function g(s,e,t,r,i){const n=new s(t?`Invalid WebSocket frame: ${e}`:e);return Error.captureStackTrace(n,g),n.code=i,n[jt]=r,n}const qs=z(rt),{randomFillSync:Kt}=S,Ie=oe,{EMPTY_BUFFER:Xt}=U,{isValidStatusCode:Zt}=ae,{mask:De,toBuffer:M}=ne,x=Symbol("kByteLength"),Qt=Buffer.alloc(4);let Jt=class P{constructor(e,t,r){this._extensions=t||{},r&&(this._generateMask=r,this._maskBuffer=Buffer.alloc(4)),this._socket=e,this._firstFragment=!0,this._compress=!1,this._bufferedBytes=0,this._deflating=!1,this._queue=[]}static frame(e,t){let r,i=!1,n=2,o=!1;t.mask&&(r=t.maskBuffer||Qt,t.generateMask?t.generateMask(r):Kt(r,0,4),o=(r[0]|r[1]|r[2]|r[3])===0,n=6);let l;typeof e=="string"?(!t.mask||o)&&t[x]!==void 0?l=t[x]:(e=Buffer.from(e),l=e.length):(l=e.length,i=t.mask&&t.readOnly&&!o);let f=l;l>=65536?(n+=8,f=127):l>125&&(n+=2,f=126);const a=Buffer.allocUnsafe(i?l+n:n);return a[0]=t.fin?t.opcode|128:t.opcode,t.rsv1&&(a[0]|=64),a[1]=f,f===126?a.writeUInt16BE(l,2):f===127&&(a[2]=a[3]=0,a.writeUIntBE(l,4,6)),t.mask?(a[1]|=128,a[n-4]=r[0],a[n-3]=r[1],a[n-2]=r[2],a[n-1]=r[3],o?[a,e]:i?(De(e,r,a,n,l),[a]):(De(e,r,e,0,l),[a,e])):[a,e]}close(e,t,r,i){let n;if(e===void 0)n=Xt;else{if(typeof e!="number"||!Zt(e))throw new TypeError("First argument must be a valid error code number");if(t===void 0||!t.length)n=Buffer.allocUnsafe(2),n.writeUInt16BE(e,0);else{const l=Buffer.byteLength(t);if(l>123)throw new RangeError("The message must not be greater than 123 bytes");n=Buffer.allocUnsafe(2+l),n.writeUInt16BE(e,0),typeof t=="string"?n.write(t,2):n.set(t,2)}}const o={[x]:n.length,fin:!0,generateMask:this._generateMask,mask:r,maskBuffer:this._maskBuffer,opcode:8,readOnly:!1,rsv1:!1};this._deflating?this.enqueue([this.dispatch,n,!1,o,i]):this.sendFrame(P.frame(n,o),i)}ping(e,t,r){let i,n;if(typeof e=="string"?(i=Buffer.byteLength(e),n=!1):(e=M(e),i=e.length,n=M.readOnly),i>125)throw new RangeError("The data size must not be greater than 125 bytes");const o={[x]:i,fin:!0,generateMask:this._generateMask,mask:t,maskBuffer:this._maskBuffer,opcode:9,readOnly:n,rsv1:!1};this._deflating?this.enqueue([this.dispatch,e,!1,o,r]):this.sendFrame(P.frame(e,o),r)}pong(e,t,r){let i,n;if(typeof e=="string"?(i=Buffer.byteLength(e),n=!1):(e=M(e),i=e.length,n=M.readOnly),i>125)throw new RangeError("The data size must not be greater than 125 bytes");const o={[x]:i,fin:!0,generateMask:this._generateMask,mask:t,maskBuffer:this._maskBuffer,opcode:10,readOnly:n,rsv1:!1};this._deflating?this.enqueue([this.dispatch,e,!1,o,r]):this.sendFrame(P.frame(e,o),r)}send(e,t,r){const i=this._extensions[Ie.extensionName];let n=t.binary?2:1,o=t.compress,l,f;if(typeof e=="string"?(l=Buffer.byteLength(e),f=!1):(e=M(e),l=e.length,f=M.readOnly),this._firstFragment?(this._firstFragment=!1,o&&i&&i.params[i._isServer?"server_no_context_takeover":"client_no_context_takeover"]&&(o=l>=i._threshold),this._compress=o):(o=!1,n=0),t.fin&&(this._firstFragment=!0),i){const a={[x]:l,fin:t.fin,generateMask:this._generateMask,mask:t.mask,maskBuffer:this._maskBuffer,opcode:n,readOnly:f,rsv1:o};this._deflating?this.enqueue([this.dispatch,e,this._compress,a,r]):this.dispatch(e,this._compress,a,r)}else this.sendFrame(P.frame(e,{[x]:l,fin:t.fin,generateMask:this._generateMask,mask:t.mask,maskBuffer:this._maskBuffer,opcode:n,readOnly:f,rsv1:!1}),r)}dispatch(e,t,r,i){if(!t){this.sendFrame(P.frame(e,r),i);return}const n=this._extensions[Ie.extensionName];this._bufferedBytes+=r[x],this._deflating=!0,n.compress(e,r.fin,(o,l)=>{if(this._socket.destroyed){const f=new Error("The socket was closed while data was being compressed");typeof i=="function"&&i(f);for(let a=0;a<this._queue.length;a++){const c=this._queue[a],h=c[c.length-1];typeof h=="function"&&h(f)}return}this._bufferedBytes-=r[x],this._deflating=!1,r.readOnly=!1,this.sendFrame(P.frame(l,r),i),this.dequeue()})}dequeue(){for(;!this._deflating&&this._queue.length;){const e=this._queue.shift();this._bufferedBytes-=e[3][x],Reflect.apply(e[0],this,e.slice(1))}}enqueue(e){this._bufferedBytes+=e[3][x],this._queue.push(e)}sendFrame(e,t){e.length===2?(this._socket.cork(),this._socket.write(e[0]),this._socket.write(e[1],t),this._socket.uncork()):this._socket.write(e[0],t)}};var it=Jt;const Ks=z(it),{kForOnEventAttribute:F,kListener:pe}=U,We=Symbol("kCode"),Ae=Symbol("kData"),Fe=Symbol("kError"),je=Symbol("kMessage"),Ge=Symbol("kReason"),I=Symbol("kTarget"),Ve=Symbol("kType"),He=Symbol("kWasClean");class B{constructor(e){this[I]=null,this[Ve]=e}get target(){return this[I]}get type(){return this[Ve]}}Object.defineProperty(B.prototype,"target",{enumerable:!0});Object.defineProperty(B.prototype,"type",{enumerable:!0});class Y extends B{constructor(e,t={}){super(e),this[We]=t.code===void 0?0:t.code,this[Ge]=t.reason===void 0?"":t.reason,this[He]=t.wasClean===void 0?!1:t.wasClean}get code(){return this[We]}get reason(){return this[Ge]}get wasClean(){return this[He]}}Object.defineProperty(Y.prototype,"code",{enumerable:!0});Object.defineProperty(Y.prototype,"reason",{enumerable:!0});Object.defineProperty(Y.prototype,"wasClean",{enumerable:!0});class le extends B{constructor(e,t={}){super(e),this[Fe]=t.error===void 0?null:t.error,this[je]=t.message===void 0?"":t.message}get error(){return this[Fe]}get message(){return this[je]}}Object.defineProperty(le.prototype,"error",{enumerable:!0});Object.defineProperty(le.prototype,"message",{enumerable:!0});class xe extends B{constructor(e,t={}){super(e),this[Ae]=t.data===void 0?null:t.data}get data(){return this[Ae]}}Object.defineProperty(xe.prototype,"data",{enumerable:!0});const es={addEventListener(s,e,t={}){for(const i of this.listeners(s))if(!t[F]&&i[pe]===e&&!i[F])return;let r;if(s==="message")r=function(n,o){const l=new xe("message",{data:o?n:n.toString()});l[I]=this,Z(e,this,l)};else if(s==="close")r=function(n,o){const l=new Y("close",{code:n,reason:o.toString(),wasClean:this._closeFrameReceived&&this._closeFrameSent});l[I]=this,Z(e,this,l)};else if(s==="error")r=function(n){const o=new le("error",{error:n,message:n.message});o[I]=this,Z(e,this,o)};else if(s==="open")r=function(){const n=new B("open");n[I]=this,Z(e,this,n)};else return;r[F]=!!t[F],r[pe]=e,t.once?this.once(s,r):this.on(s,r)},removeEventListener(s,e){for(const t of this.listeners(s))if(t[pe]===e&&!t[F]){this.removeListener(s,t);break}}};var ts={CloseEvent:Y,ErrorEvent:le,Event:B,EventTarget:es,MessageEvent:xe};function Z(s,e,t){typeof s=="object"&&s.handleEvent?s.handleEvent.call(s,t):s.call(e,t)}const{tokenChars:j}=ae;function k(s,e,t){s[e]===void 0?s[e]=[t]:s[e].push(t)}function ss(s){const e=Object.create(null);let t=Object.create(null),r=!1,i=!1,n=!1,o,l,f=-1,a=-1,c=-1,h=0;for(;h<s.length;h++)if(a=s.charCodeAt(h),o===void 0)if(c===-1&&j[a]===1)f===-1&&(f=h);else if(h!==0&&(a===32||a===9))c===-1&&f!==-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h);const v=s.slice(f,c);a===44?(k(e,v,t),t=Object.create(null)):o=v,f=c=-1}else throw new SyntaxError(`Unexpected character at index ${h}`);else if(l===void 0)if(c===-1&&j[a]===1)f===-1&&(f=h);else if(a===32||a===9)c===-1&&f!==-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h),k(t,s.slice(f,c),!0),a===44&&(k(e,o,t),t=Object.create(null),o=void 0),f=c=-1}else if(a===61&&f!==-1&&c===-1)l=s.slice(f,h),f=c=-1;else throw new SyntaxError(`Unexpected character at index ${h}`);else if(i){if(j[a]!==1)throw new SyntaxError(`Unexpected character at index ${h}`);f===-1?f=h:r||(r=!0),i=!1}else if(n)if(j[a]===1)f===-1&&(f=h);else if(a===34&&f!==-1)n=!1,c=h;else if(a===92)i=!0;else throw new SyntaxError(`Unexpected character at index ${h}`);else if(a===34&&s.charCodeAt(h-1)===61)n=!0;else if(c===-1&&j[a]===1)f===-1&&(f=h);else if(f!==-1&&(a===32||a===9))c===-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h);let v=s.slice(f,c);r&&(v=v.replace(/\\/g,""),r=!1),k(t,l,v),a===44&&(k(e,o,t),t=Object.create(null),o=void 0),l=void 0,f=c=-1}else throw new SyntaxError(`Unexpected character at index ${h}`);if(f===-1||n||a===32||a===9)throw new SyntaxError("Unexpected end of input");c===-1&&(c=h);const p=s.slice(f,c);return o===void 0?k(e,p,t):(l===void 0?k(t,p,!0):r?k(t,l,p.replace(/\\/g,"")):k(t,l,p),k(e,o,t)),e}function rs(s){return Object.keys(s).map(e=>{let t=s[e];return Array.isArray(t)||(t=[t]),t.map(r=>[e].concat(Object.keys(r).map(i=>{let n=r[i];return Array.isArray(n)||(n=[n]),n.map(o=>o===!0?i:`${i}=${o}`).join("; ")})).join("; ")).join(", ")}).join(", ")}var nt={format:rs,parse:ss};const is=S,ns=S,os=S,ot=S,as=S,{randomBytes:ls,createHash:fs}=S,{URL:me}=S,T=oe,hs=rt,cs=it,{BINARY_TYPES:ze,EMPTY_BUFFER:Q,GUID:us,kForOnEventAttribute:ge,kListener:ds,kStatusCode:_s,kWebSocket:y,NOOP:at}=U,{EventTarget:{addEventListener:ps,removeEventListener:ms}}=ts,{format:gs,parse:ys}=nt,{toBuffer:vs}=ne,Ss=30*1e3,lt=Symbol("kAborted"),ye=[8,13],O=["CONNECTING","OPEN","CLOSING","CLOSED"],Es=/^[!#$%&'*+\-.0-9A-Z^_`|a-z~]+$/;let m=class d extends is{constructor(e,t,r){super(),this._binaryType=ze[0],this._closeCode=1006,this._closeFrameReceived=!1,this._closeFrameSent=!1,this._closeMessage=Q,this._closeTimer=null,this._extensions={},this._paused=!1,this._protocol="",this._readyState=d.CONNECTING,this._receiver=null,this._sender=null,this._socket=null,e!==null?(this._bufferedAmount=0,this._isServer=!1,this._redirects=0,t===void 0?t=[]:Array.isArray(t)||(typeof t=="object"&&t!==null?(r=t,t=[]):t=[t]),ht(this,e,t,r)):this._isServer=!0}get binaryType(){return this._binaryType}set binaryType(e){ze.includes(e)&&(this._binaryType=e,this._receiver&&(this._receiver._binaryType=e))}get bufferedAmount(){return this._socket?this._socket._writableState.length+this._sender._bufferedBytes:this._bufferedAmount}get extensions(){return Object.keys(this._extensions).join()}get isPaused(){return this._paused}get onclose(){return null}get onerror(){return null}get onopen(){return null}get onmessage(){return null}get protocol(){return this._protocol}get readyState(){return this._readyState}get url(){return this._url}setSocket(e,t,r){const i=new hs({binaryType:this.binaryType,extensions:this._extensions,isServer:this._isServer,maxPayload:r.maxPayload,skipUTF8Validation:r.skipUTF8Validation});this._sender=new cs(e,this._extensions,r.generateMask),this._receiver=i,this._socket=e,i[y]=this,e[y]=this,i.on("conclude",ks),i.on("drain",ws),i.on("error",Os),i.on("message",Cs),i.on("ping",Ts),i.on("pong",Ls),e.setTimeout(0),e.setNoDelay(),t.length>0&&e.unshift(t),e.on("close",ut),e.on("data",fe),e.on("end",dt),e.on("error",_t),this._readyState=d.OPEN,this.emit("open")}emitClose(){if(!this._socket){this._readyState=d.CLOSED,this.emit("close",this._closeCode,this._closeMessage);return}this._extensions[T.extensionName]&&this._extensions[T.extensionName].cleanup(),this._receiver.removeAllListeners(),this._readyState=d.CLOSED,this.emit("close",this._closeCode,this._closeMessage)}close(e,t){if(this.readyState!==d.CLOSED){if(this.readyState===d.CONNECTING){const r="WebSocket was closed before the connection was established";b(this,this._req,r);return}if(this.readyState===d.CLOSING){this._closeFrameSent&&(this._closeFrameReceived||this._receiver._writableState.errorEmitted)&&this._socket.end();return}this._readyState=d.CLOSING,this._sender.close(e,t,!this._isServer,r=>{r||(this._closeFrameSent=!0,(this._closeFrameReceived||this._receiver._writableState.errorEmitted)&&this._socket.end())}),this._closeTimer=setTimeout(this._socket.destroy.bind(this._socket),Ss)}}pause(){this.readyState===d.CONNECTING||this.readyState===d.CLOSED||(this._paused=!0,this._socket.pause())}ping(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof e=="function"?(r=e,e=t=void 0):typeof t=="function"&&(r=t,t=void 0),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}t===void 0&&(t=!this._isServer),this._sender.ping(e||Q,t,r)}pong(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof e=="function"?(r=e,e=t=void 0):typeof t=="function"&&(r=t,t=void 0),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}t===void 0&&(t=!this._isServer),this._sender.pong(e||Q,t,r)}resume(){this.readyState===d.CONNECTING||this.readyState===d.CLOSED||(this._paused=!1,this._receiver._writableState.needDrain||this._socket.resume())}send(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof t=="function"&&(r=t,t={}),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}const i={binary:typeof e!="string",mask:!this._isServer,compress:!0,fin:!0,...t};this._extensions[T.extensionName]||(i.compress=!1),this._sender.send(e||Q,i,r)}terminate(){if(this.readyState!==d.CLOSED){if(this.readyState===d.CONNECTING){const e="WebSocket was closed before the connection was established";b(this,this._req,e);return}this._socket&&(this._readyState=d.CLOSING,this._socket.destroy())}}};Object.defineProperty(m,"CONNECTING",{enumerable:!0,value:O.indexOf("CONNECTING")});Object.defineProperty(m.prototype,"CONNECTING",{enumerable:!0,value:O.indexOf("CONNECTING")});Object.defineProperty(m,"OPEN",{enumerable:!0,value:O.indexOf("OPEN")});Object.defineProperty(m.prototype,"OPEN",{enumerable:!0,value:O.indexOf("OPEN")});Object.defineProperty(m,"CLOSING",{enumerable:!0,value:O.indexOf("CLOSING")});Object.defineProperty(m.prototype,"CLOSING",{enumerable:!0,value:O.indexOf("CLOSING")});Object.defineProperty(m,"CLOSED",{enumerable:!0,value:O.indexOf("CLOSED")});Object.defineProperty(m.prototype,"CLOSED",{enumerable:!0,value:O.indexOf("CLOSED")});["binaryType","bufferedAmount","extensions","isPaused","protocol","readyState","url"].forEach(s=>{Object.defineProperty(m.prototype,s,{enumerable:!0})});["open","error","close","message"].forEach(s=>{Object.defineProperty(m.prototype,`on${s}`,{enumerable:!0,get(){for(const e of this.listeners(s))if(e[ge])return e[ds];return null},set(e){for(const t of this.listeners(s))if(t[ge]){this.removeListener(s,t);break}typeof e=="function"&&this.addEventListener(s,e,{[ge]:!0})}})});m.prototype.addEventListener=ps;m.prototype.removeEventListener=ms;var ft=m;function ht(s,e,t,r){const i={protocolVersion:ye[1],maxPayload:104857600,skipUTF8Validation:!1,perMessageDeflate:!0,followRedirects:!1,maxRedirects:10,...r,createConnection:void 0,socketPath:void 0,hostname:void 0,protocol:void 0,timeout:void 0,method:"GET",host:void 0,path:void 0,port:void 0};if(!ye.includes(i.protocolVersion))throw new RangeError(`Unsupported protocol version: ${i.protocolVersion} (supported versions: ${ye.join(", ")})`);let n;if(e instanceof me)n=e,s._url=e.href;else{try{n=new me(e)}catch{throw new SyntaxError(`Invalid URL: ${e}`)}s._url=e}const o=n.protocol==="wss:",l=n.protocol==="ws+unix:";let f;if(n.protocol!=="ws:"&&!o&&!l?f=`The URL's protocol must be one of "ws:", "wss:", or "ws+unix:"`:l&&!n.pathname?f="The URL's pathname is empty":n.hash&&(f="The URL contains a fragment identifier"),f){const u=new SyntaxError(f);if(s._redirects===0)throw u;ee(s,u);return}const a=o?443:80,c=ls(16).toString("base64"),h=o?ns.request:os.request,p=new Set;let v;if(i.createConnection=o?xs:bs,i.defaultPort=i.defaultPort||a,i.port=n.port||a,i.host=n.hostname.startsWith("[")?n.hostname.slice(1,-1):n.hostname,i.headers={...i.headers,"Sec-WebSocket-Version":i.protocolVersion,"Sec-WebSocket-Key":c,Connection:"Upgrade",Upgrade:"websocket"},i.path=n.pathname+n.search,i.timeout=i.handshakeTimeout,i.perMessageDeflate&&(v=new T(i.perMessageDeflate!==!0?i.perMessageDeflate:{},!1,i.maxPayload),i.headers["Sec-WebSocket-Extensions"]=gs({[T.extensionName]:v.offer()})),t.length){for(const u of t){if(typeof u!="string"||!Es.test(u)||p.has(u))throw new SyntaxError("An invalid or duplicated subprotocol was specified");p.add(u)}i.headers["Sec-WebSocket-Protocol"]=t.join(",")}if(i.origin&&(i.protocolVersion<13?i.headers["Sec-WebSocket-Origin"]=i.origin:i.headers.Origin=i.origin),(n.username||n.password)&&(i.auth=`${n.username}:${n.password}`),l){const u=i.path.split(":");i.socketPath=u[0],i.path=u[1]}let _;if(i.followRedirects){if(s._redirects===0){s._originalIpc=l,s._originalSecure=o,s._originalHostOrSocketPath=l?i.socketPath:n.host;const u=r&&r.headers;if(r={...r,headers:{}},u)for(const[E,$]of Object.entries(u))r.headers[E.toLowerCase()]=$}else if(s.listenerCount("redirect")===0){const u=l?s._originalIpc?i.socketPath===s._originalHostOrSocketPath:!1:s._originalIpc?!1:n.host===s._originalHostOrSocketPath;(!u||s._originalSecure&&!o)&&(delete i.headers.authorization,delete i.headers.cookie,u||delete i.headers.host,i.auth=void 0)}i.auth&&!r.headers.authorization&&(r.headers.authorization="Basic "+Buffer.from(i.auth).toString("base64")),_=s._req=h(i),s._redirects&&s.emit("redirect",s.url,_)}else _=s._req=h(i);i.timeout&&_.on("timeout",()=>{b(s,_,"Opening handshake has timed out")}),_.on("error",u=>{_===null||_[lt]||(_=s._req=null,ee(s,u))}),_.on("response",u=>{const E=u.headers.location,$=u.statusCode;if(E&&i.followRedirects&&$>=300&&$<400){if(++s._redirects>i.maxRedirects){b(s,_,"Maximum redirects exceeded");return}_.abort();let q;try{q=new me(E,e)}catch{const L=new SyntaxError(`Invalid URL: ${E}`);ee(s,L);return}ht(s,q,t,r)}else s.emit("unexpected-response",_,u)||b(s,_,`Unexpected server response: ${u.statusCode}`)}),_.on("upgrade",(u,E,$)=>{if(s.emit("upgrade",u),s.readyState!==m.CONNECTING)return;if(_=s._req=null,u.headers.upgrade.toLowerCase()!=="websocket"){b(s,E,"Invalid Upgrade header");return}const q=fs("sha1").update(c+us).digest("base64");if(u.headers["sec-websocket-accept"]!==q){b(s,E,"Invalid Sec-WebSocket-Accept header");return}const D=u.headers["sec-websocket-protocol"];let L;if(D!==void 0?p.size?p.has(D)||(L="Server sent an invalid subprotocol"):L="Server sent a subprotocol but none was requested":p.size&&(L="Server sent no subprotocol"),L){b(s,E,L);return}D&&(s._protocol=D);const ke=u.headers["sec-websocket-extensions"];if(ke!==void 0){if(!v){b(s,E,"Server sent a Sec-WebSocket-Extensions header but no extension was requested");return}let he;try{he=ys(ke)}catch{b(s,E,"Invalid Sec-WebSocket-Extensions header");return}const we=Object.keys(he);if(we.length!==1||we[0]!==T.extensionName){b(s,E,"Server indicated an extension that was not requested");return}try{v.accept(he[T.extensionName])}catch{b(s,E,"Invalid Sec-WebSocket-Extensions header");return}s._extensions[T.extensionName]=v}s.setSocket(E,$,{generateMask:i.generateMask,maxPayload:i.maxPayload,skipUTF8Validation:i.skipUTF8Validation})}),i.finishRequest?i.finishRequest(_,s):_.end()}function ee(s,e){s._readyState=m.CLOSING,s.emit("error",e),s.emitClose()}function bs(s){return s.path=s.socketPath,ot.connect(s)}function xs(s){return s.path=void 0,!s.servername&&s.servername!==""&&(s.servername=ot.isIP(s.host)?"":s.host),as.connect(s)}function b(s,e,t){s._readyState=m.CLOSING;const r=new Error(t);Error.captureStackTrace(r,b),e.setHeader?(e[lt]=!0,e.abort(),e.socket&&!e.socket.destroyed&&e.socket.destroy(),process.nextTick(ee,s,r)):(e.destroy(r),e.once("error",s.emit.bind(s,"error")),e.once("close",s.emitClose.bind(s)))}function ve(s,e,t){if(e){const r=vs(e).length;s._socket?s._sender._bufferedBytes+=r:s._bufferedAmount+=r}if(t){const r=new Error(`WebSocket is not open: readyState ${s.readyState} (${O[s.readyState]})`);process.nextTick(t,r)}}function ks(s,e){const t=this[y];t._closeFrameReceived=!0,t._closeMessage=e,t._closeCode=s,t._socket[y]!==void 0&&(t._socket.removeListener("data",fe),process.nextTick(ct,t._socket),s===1005?t.close():t.close(s,e))}function ws(){const s=this[y];s.isPaused||s._socket.resume()}function Os(s){const e=this[y];e._socket[y]!==void 0&&(e._socket.removeListener("data",fe),process.nextTick(ct,e._socket),e.close(s[_s])),e.emit("error",s)}function Ye(){this[y].emitClose()}function Cs(s,e){this[y].emit("message",s,e)}function Ts(s){const e=this[y];e.pong(s,!e._isServer,at),e.emit("ping",s)}function Ls(s){this[y].emit("pong",s)}function ct(s){s.resume()}function ut(){const s=this[y];this.removeListener("close",ut),this.removeListener("data",fe),this.removeListener("end",dt),s._readyState=m.CLOSING;let e;!this._readableState.endEmitted&&!s._closeFrameReceived&&!s._receiver._writableState.errorEmitted&&(e=s._socket.read())!==null&&s._receiver.write(e),s._receiver.end(),this[y]=void 0,clearTimeout(s._closeTimer),s._receiver._writableState.finished||s._receiver._writableState.errorEmitted?s.emitClose():(s._receiver.on("error",Ye),s._receiver.on("finish",Ye))}function fe(s){this[y]._receiver.write(s)||this.pause()}function dt(){const s=this[y];s._readyState=m.CLOSING,s._receiver.end(),this.end()}function _t(){const s=this[y];this.removeListener("error",_t),this.on("error",at),s&&(s._readyState=m.CLOSING,this.destroy())}const Xs=z(ft),{tokenChars:Ns}=ae;function Ps(s){const e=new Set;let t=-1,r=-1,i=0;for(i;i<s.length;i++){const o=s.charCodeAt(i);if(r===-1&&Ns[o]===1)t===-1&&(t=i);else if(i!==0&&(o===32||o===9))r===-1&&t!==-1&&(r=i);else if(o===44){if(t===-1)throw new SyntaxError(`Unexpected character at index ${i}`);r===-1&&(r=i);const l=s.slice(t,r);if(e.has(l))throw new SyntaxError(`The "${l}" subprotocol is duplicated`);e.add(l),t=r=-1}else throw new SyntaxError(`Unexpected character at index ${i}`)}if(t===-1||r!==-1)throw new SyntaxError("Unexpected end of input");const n=s.slice(t,i);if(e.has(n))throw new SyntaxError(`The "${n}" subprotocol is duplicated`);return e.add(n),e}var Rs={parse:Ps};const Us=S,ie=S,{createHash:Bs}=S,qe=nt,N=oe,$s=Rs,Ms=ft,{GUID:Is,kWebSocket:Ds}=U,Ws=/^[+/0-9A-Za-z]{22}==$/,Ke=0,Xe=1,pt=2;class As extends Us{constructor(e,t){if(super(),e={maxPayload:100*1024*1024,skipUTF8Validation:!1,perMessageDeflate:!1,handleProtocols:null,clientTracking:!0,verifyClient:null,noServer:!1,backlog:null,server:null,host:null,path:null,port:null,WebSocket:Ms,...e},e.port==null&&!e.server&&!e.noServer||e.port!=null&&(e.server||e.noServer)||e.server&&e.noServer)throw new TypeError('One and only one of the "port", "server", or "noServer" options must be specified');if(e.port!=null?(this._server=ie.createServer((r,i)=>{const n=ie.STATUS_CODES[426];i.writeHead(426,{"Content-Length":n.length,"Content-Type":"text/plain"}),i.end(n)}),this._server.listen(e.port,e.host,e.backlog,t)):e.server&&(this._server=e.server),this._server){const r=this.emit.bind(this,"connection");this._removeListeners=js(this._server,{listening:this.emit.bind(this,"listening"),error:this.emit.bind(this,"error"),upgrade:(i,n,o)=>{this.handleUpgrade(i,n,o,r)}})}e.perMessageDeflate===!0&&(e.perMessageDeflate={}),e.clientTracking&&(this.clients=new Set,this._shouldEmitClose=!1),this.options=e,this._state=Ke}address(){if(this.options.noServer)throw new Error('The server is operating in "noServer" mode');return this._server?this._server.address():null}close(e){if(this._state===pt){e&&this.once("close",()=>{e(new Error("The server is not running"))}),process.nextTick(G,this);return}if(e&&this.once("close",e),this._state!==Xe)if(this._state=Xe,this.options.noServer||this.options.server)this._server&&(this._removeListeners(),this._removeListeners=this._server=null),this.clients?this.clients.size?this._shouldEmitClose=!0:process.nextTick(G,this):process.nextTick(G,this);else{const t=this._server;this._removeListeners(),this._removeListeners=this._server=null,t.close(()=>{G(this)})}}shouldHandle(e){if(this.options.path){const t=e.url.indexOf("?");if((t!==-1?e.url.slice(0,t):e.url)!==this.options.path)return!1}return!0}handleUpgrade(e,t,r,i){t.on("error",Ze);const n=e.headers["sec-websocket-key"],o=+e.headers["sec-websocket-version"];if(e.method!=="GET"){R(this,e,t,405,"Invalid HTTP method");return}if(e.headers.upgrade.toLowerCase()!=="websocket"){R(this,e,t,400,"Invalid Upgrade header");return}if(!n||!Ws.test(n)){R(this,e,t,400,"Missing or invalid Sec-WebSocket-Key header");return}if(o!==8&&o!==13){R(this,e,t,400,"Missing or invalid Sec-WebSocket-Version header");return}if(!this.shouldHandle(e)){H(t,400);return}const l=e.headers["sec-websocket-protocol"];let f=new Set;if(l!==void 0)try{f=$s.parse(l)}catch{R(this,e,t,400,"Invalid Sec-WebSocket-Protocol header");return}const a=e.headers["sec-websocket-extensions"],c={};if(this.options.perMessageDeflate&&a!==void 0){const h=new N(this.options.perMessageDeflate,!0,this.options.maxPayload);try{const p=qe.parse(a);p[N.extensionName]&&(h.accept(p[N.extensionName]),c[N.extensionName]=h)}catch{R(this,e,t,400,"Invalid or unacceptable Sec-WebSocket-Extensions header");return}}if(this.options.verifyClient){const h={origin:e.headers[`${o===8?"sec-websocket-origin":"origin"}`],secure:!!(e.socket.authorized||e.socket.encrypted),req:e};if(this.options.verifyClient.length===2){this.options.verifyClient(h,(p,v,_,u)=>{if(!p)return H(t,v||401,_,u);this.completeUpgrade(c,n,f,e,t,r,i)});return}if(!this.options.verifyClient(h))return H(t,401)}this.completeUpgrade(c,n,f,e,t,r,i)}completeUpgrade(e,t,r,i,n,o,l){if(!n.readable||!n.writable)return n.destroy();if(n[Ds])throw new Error("server.handleUpgrade() was called more than once with the same socket, possibly due to a misconfiguration");if(this._state>Ke)return H(n,503);const a=["HTTP/1.1 101 Switching Protocols","Upgrade: websocket","Connection: Upgrade",`Sec-WebSocket-Accept: ${Bs("sha1").update(t+Is).digest("base64")}`],c=new this.options.WebSocket(null);if(r.size){const h=this.options.handleProtocols?this.options.handleProtocols(r,i):r.values().next().value;h&&(a.push(`Sec-WebSocket-Protocol: ${h}`),c._protocol=h)}if(e[N.extensionName]){const h=e[N.extensionName].params,p=qe.format({[N.extensionName]:[h]});a.push(`Sec-WebSocket-Extensions: ${p}`),c._extensions=e}this.emit("headers",a,i),n.write(a.concat(`\r
2
- `).join(`\r
3
- `)),n.removeListener("error",Ze),c.setSocket(n,o,{maxPayload:this.options.maxPayload,skipUTF8Validation:this.options.skipUTF8Validation}),this.clients&&(this.clients.add(c),c.on("close",()=>{this.clients.delete(c),this._shouldEmitClose&&!this.clients.size&&process.nextTick(G,this)})),l(c,i)}}var Fs=As;function js(s,e){for(const t of Object.keys(e))s.on(t,e[t]);return function(){for(const r of Object.keys(e))s.removeListener(r,e[r])}}function G(s){s._state=pt,s.emit("close")}function Ze(){this.destroy()}function H(s,e,t,r){t=t||ie.STATUS_CODES[e],r={Connection:"close","Content-Type":"text/html","Content-Length":Buffer.byteLength(t),...r},s.once("finish",s.destroy),s.end(`HTTP/1.1 ${e} ${ie.STATUS_CODES[e]}\r
4
- `+Object.keys(r).map(i=>`${i}: ${r[i]}`).join(`\r
5
- `)+`\r
6
- \r
7
- `+t)}function R(s,e,t,r,i){if(s.listenerCount("wsClientError")){const n=new Error(i);Error.captureStackTrace(n,R),s.emit("wsClientError",n,t,e)}else H(t,r,i)}const Zs=z(Fs);export{qs as Receiver,Ks as Sender,Xs as WebSocket,Zs as WebSocketServer,Vs as createWebSocketStream,Xs as default};
8
- //# sourceMappingURL=wrapper-6f348d45-38be7a64.js.map
 
 
 
 
 
 
 
 
 
spaces/DarrenK196/catvsdog/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Catvsdog
3
- emoji: 😻
4
- colorFrom: pink
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.4.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference