parquet-converter commited on
Commit
753f4ff
·
1 Parent(s): ac0524b

Update parquet files (step 37 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD for Mac M1 Free Download How to Avoid Viruses and Legal Issues.md +0 -44
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codigo De Activacion Robot Structural Analysis Professional 2013.md +0 -35
  3. spaces/1gistliPinn/ChatGPT4/Examples/Active Sky Next Fsx Crack Sp2 18 [HOT].md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Bcm92045nmd Driver Download [REPACK].md +0 -168
  5. spaces/1gistliPinn/ChatGPT4/Examples/English Vinglish Full Movie Download WorldHigh Quality Free4u 23.md +0 -6
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us The Most Popular Game of 2023 Now Available for Android 5.1.1.md +0 -150
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRIFT SPIRITS MOD APK Everything You Need to Know.md +0 -116
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el nuevo modo de juego de Stickman duelista supremo y reta a tus amigos en lnea.md +0 -197
  9. spaces/1phancelerku/anime-remove-background/Cleaner How to Remove Unwanted Files and Boost Your System Performance.md +0 -147
  10. spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_123821KB.py +0 -118
  11. spaces/AIConsultant/MusicGen/audiocraft/utils/samples/__init__.py +0 -5
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/metrics.py +0 -69
  13. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/logger.py +0 -87
  14. spaces/AIZero2HeroBootcamp/StaticHTML5Playcanvas/README.md +0 -10
  15. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/midas_net_custom.py +0 -128
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Checkbox.d.ts +0 -2
  17. spaces/AkitoP/umamusume_bert_vits2/utils.py +0 -356
  18. spaces/Amrrs/DragGan-Inversion/stylegan_human/__init__.py +0 -0
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/CODE_OF_CONDUCT.md +0 -130
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion.py +0 -927
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete_flax.py +0 -283
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/repaint/__init__.py +0 -0
  23. spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py +0 -16
  24. spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py +0 -9
  25. spaces/Andyrasika/Andyrasika-lora_diffusion/app.py +0 -3
  26. spaces/Arthur678/vits-uma-genshin-honkai/commons.py +0 -172
  27. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/__init__.py +0 -18
  28. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/connectionpool.py +0 -1110
  29. spaces/AzumaSeren100/XuanShen-Bert-VITS2/modules.py +0 -452
  30. spaces/BasToTheMax/TTS/README.md +0 -10
  31. spaces/Benson/text-generation/Examples/Car Park.md +0 -148
  32. spaces/Benson/text-generation/Examples/Descargar Destino Final Mod Apk.md +0 -58
  33. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/misc/coord.py +0 -31
  34. spaces/BetterAPI/BetterChat/src/styles/main.css +0 -17
  35. spaces/CVPR/LIVE/thrust/thrust/detail/complex/cpow.h +0 -55
  36. spaces/CVPR/LIVE/thrust/thrust/detail/dependencies_aware_execution_policy.h +0 -105
  37. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/unique.h +0 -23
  38. spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/uninitialized_copy.h +0 -44
  39. spaces/CVPR/regionclip-demo/detectron2/evaluation/pascal_voc_evaluation.py +0 -300
  40. spaces/Carterclear/swarm-agents/README.md +0 -14
  41. spaces/Celestinian/Nora-Inference/README.md +0 -13
  42. spaces/Chukwuka/Dog_Breed_ImageWoof/utils.py +0 -120
  43. spaces/Clementapa/orang-outan-image-video-detection/app.py +0 -230
  44. spaces/CofAI/chat.b4/client/css/hljs.css +0 -68
  45. spaces/CofAI/chat/g4f/Provider/Providers/Bard.py +0 -74
  46. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/event.py +0 -12
  47. spaces/DDD2222/webui/README.md +0 -20
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/checkbox.py +0 -134
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_tensorboard_logger.py +0 -157
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/lfs.py +0 -202
spaces/1acneusushi/gradio-2dmoleculeeditor/data/AutoCAD for Mac M1 Free Download How to Avoid Viruses and Legal Issues.md DELETED
@@ -1,44 +0,0 @@
1
-
2
- <h1>How to Get AutoCAD for Mac M1 for Free</h1>
3
-
4
- <p>AutoCAD is one of the most popular and powerful CAD software in the world, used by architects, engineers, designers, and many other professionals. But what if you have a Mac M1 computer and you want to use AutoCAD without paying a subscription fee? Is there a way to get AutoCAD for Mac M1 for free? In this article, we will show you how to do that and what are the pros and cons of using AutoCAD for Mac M1 for free.</p>
5
-
6
- <h2>What is AutoCAD for Mac M1?</h2>
7
-
8
- <p>AutoCAD for Mac M1 is the latest version of AutoCAD software that is compatible with the Apple M1 chip, which powers the new Mac computers such as the MacBook Air, MacBook Pro, Mac mini, and iMac. The Apple M1 chip is a powerful processor that offers faster performance, longer battery life, and better graphics than the previous Intel-based Macs. AutoCAD for Mac M1 delivers the same functionality and features as the Windows version of AutoCAD, but with a native Mac interface and optimized performance for the M1 chip.</p>
9
- <h2>autocad mac m1 free</h2><br /><p><b><b>Download Zip</b> - <a href="https://byltly.com/2uKwEW">https://byltly.com/2uKwEW</a></b></p><br /><br />
10
-
11
- <h2>How to Get AutoCAD for Mac M1 for Free?</h2>
12
-
13
- <p>There are several ways to get AutoCAD for Mac M1 for free, depending on your needs and preferences. Here are some of them:</p>
14
-
15
- <ul>
16
- <li><b>Educational license</b>: If you are a student or an educator, you can get a free educational license of AutoCAD for Mac M1 from Autodesk's website. You will need to create an account with your school email address and verify your eligibility. You can use the educational license for learning purposes only and not for commercial or professional projects. The educational license is valid for one year and can be renewed as long as you remain eligible.</li>
17
- <li><b>Free trial</b>: If you are not a student or an educator, you can still get a free trial of AutoCAD for Mac M1 from Autodesk's website. You will need to create an account with your email address and download the installer file. You can use the free trial for 30 days and access all the features and functions of AutoCAD for Mac M1. However, after the trial period expires, you will need to purchase a subscription or uninstall the software.</li>
18
- <li><b>Cracked version</b>: If you are looking for a way to use AutoCAD for Mac M1 without paying or verifying your eligibility, you might be tempted to look for a cracked version of the software on Reddit or other websites. A cracked version is a modified version of the software that bypasses the license verification or activation process. However, using a cracked version of AutoCAD for Mac M1 is illegal, risky, and not recommended. You may face legal consequences if you are caught using pirated software. You may also expose your computer to viruses or malware that can damage your data or system. You may also miss out on important updates, bug fixes, and security patches that Autodesk provides for its legitimate users.</li>
19
- </ul>
20
-
21
- <h2>What are the Pros and Cons of Using AutoCAD for Mac M1 for Free?</h2>
22
-
23
- <p>Using AutoCAD for Mac M1 for free has some pros and cons that you should consider before deciding whether it is worth it or not. Some of the pros are:</p>
24
-
25
- <ul>
26
- <li>You can save money by not paying a subscription fee.</li>
27
- <li>You can access the same features and functions as the paid version of AutoCAD for Mac M1.</li>
28
- <li>You can learn how to use AutoCAD for Mac M1 and improve your skills.</li>
29
- </ul>
30
-
31
- <p>Some of the cons are:</p>
32
-
33
- <ul>
34
- <li>You may violate Autodesk's terms of service and face legal actions if you use a cracked version of AutoCAD for Mac M1.</li>
35
- <li>You may compromise your computer's security and performance if you download a cracked version of AutoCAD for Mac M1 from unreliable sources.</li>
36
- <li>You may lose access to technical support, customer service, updates, and enhancements that Autodesk provides for its paid users.</li>
37
- </ul>
38
-
39
- <h2>Conclusion</h2>
40
-
41
- <p>AutoCAD for Mac M1 is a powerful CAD software that is compatible with the Apple M1 chip and offers fast performance, high-quality graphics, and native Mac interface. You can get AutoCAD for Mac M1 for free by</p>
42
- <p></p> ddb901b051<br />
43
- <br />
44
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codigo De Activacion Robot Structural Analysis Professional 2013.md DELETED
@@ -1,35 +0,0 @@
1
- <br />
2
- <h1>¿Cómo activar Robot Structural Analysis Professional 2013?</h1>
3
- <p>Robot Structural Analysis Professional 2013 es un software de Autodesk que permite realizar análisis estructurales avanzados y simulaciones de cargas dinámicas. Para poder utilizar este programa, es necesario activar una licencia válida que se obtiene al comprar el producto o al suscribirse a un plan de Autodesk.</p>
4
- <h2>Codigo De Activacion Robot Structural Analysis Professional 2013</h2><br /><p><b><b>Download File</b> &#9193; <a href="https://byltly.com/2uKvFq">https://byltly.com/2uKvFq</a></b></p><br /><br />
5
- <p>Para activar Robot Structural Analysis Professional 2013, se debe seguir estos pasos:</p>
6
- <ol>
7
- <li>Instalar el software en el ordenador siguiendo las instrucciones del instalador.</li>
8
- <li>Ejecutar el programa y seleccionar la opción "Activar" en la pantalla de inicio.</li>
9
- <li>Ingresar el número de serie y la clave de producto que se recibieron al comprar o suscribirse al software. El número de serie tiene 12 dígitos y la clave de producto tiene 5 dígitos. Por ejemplo, el número de serie puede ser 123-45678901 y la clave de producto puede ser 547F1.</li>
10
- <li>Seleccionar el método de activación que se prefiera: por Internet, por teléfono o por correo electrónico. Si se elige la opción por Internet, se debe tener una conexión a Internet activa y seguir las instrucciones en pantalla. Si se elige la opción por teléfono o por correo electrónico, se debe contactar con el servicio de atención al cliente de Autodesk y proporcionar el código de solicitud que se genera en el programa. El código de solicitud tiene 16 dígitos y se muestra en la pantalla de activación. Por ejemplo, el código de solicitud puede ser A1B2-C3D4-E5F6-G7H8.</li>
11
- <li>Introducir el código de activación que se recibe del servicio de atención al cliente de Autodesk. El código de activación tiene 16 dígitos y se debe ingresar en el programa para completar la activación. Por ejemplo, el código de activación puede ser I9J0-K1L2-M3N4-O5P6.</li>
12
- <li>Disfrutar del software y sus funciones.</li>
13
- </ol>
14
- <p>Si se tiene algún problema o duda con la activación, se puede consultar la página web de Autodesk o contactar con el soporte técnico.</p>
15
-
16
- <p>Robot Structural Analysis Professional 2013 es un software que ofrece múltiples funciones para el diseño y análisis de estructuras de todo tipo. Algunas de las funciones más destacadas son:</p>
17
- <p></p>
18
- <ul>
19
- <li>Permite modelar estructuras complejas con elementos finitos de diferentes materiales, como acero, hormigón, madera o aluminio.</li>
20
- <li>Permite realizar análisis estáticos y dinámicos de las estructuras, considerando diferentes tipos de cargas, como peso propio, viento, sismo, nieve o temperatura.</li>
21
- <li>Permite verificar el cumplimiento de las normas de diseño y cálculo de diferentes países y regiones, como Eurocódigo, ACI, AISC o ASCE.</li>
22
- <li>Permite generar informes detallados y personalizados de los resultados del análisis, incluyendo gráficos, tablas y diagramas.</li>
23
- <li>Permite exportar e importar datos desde y hacia otros programas de Autodesk, como AutoCAD, Revit o Inventor.</li>
24
- </ul>
25
- <p>Robot Structural Analysis Professional 2013 es un software que se actualiza constantemente para ofrecer las mejores prestaciones y funcionalidades a los usuarios. Para actualizar a una versión más reciente del software, se debe seguir estos pasos:</p>
26
- <ol>
27
- <li>Acceder a la cuenta de Autodesk y verificar si se tiene una suscripción activa al software o si se puede renovar la licencia.</li>
28
- <li>Descargar la versión más reciente del software desde la página web de Autodesk o desde el gestor de aplicaciones de Autodesk.</li>
29
- <li>Instalar la nueva versión del software en el ordenador siguiendo las instrucciones del instalador.</li>
30
- <li>Activar la nueva versión del software con el mismo número de serie y clave de producto que se usaron para la versión anterior.</li>
31
- <li>Disfrutar de las nuevas funciones y mejoras del software.</li>
32
- </ol>
33
- <p>Si se tiene algún problema o duda con la actualización, se puede consultar la página web de Autodesk o contactar con el soporte técnico.</p> 81aa517590<br />
34
- <br />
35
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Active Sky Next Fsx Crack Sp2 18 [HOT].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Active Sky Next Fsx Crack Sp2 18</h2><br /><p><b><b>Download File</b> &#9745; <a href="https://imgfil.com/2uxYle">https://imgfil.com/2uxYle</a></b></p><br /><br />
2
- <br />
3
- Active Sky is a comprehensive weather simulation engine for FSX, P3D and now X-Plane desktop flight simulator platforms. Over 20 years of development, ...expected "by the end of the year"! Unlike solutions such as Aerosoft, OASIS, etc., Active Sky not only provides modeling tools, but also provides “full integration” (full modeling) with weather data, including visual information, maps, etc. “Currently, in our opinion, Active Sky is the most powerful and reliable weather package in the world,” says Michael Schmitt, Marketing Director of Active Sky. 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Bcm92045nmd Driver Download [REPACK].md DELETED
@@ -1,168 +0,0 @@
1
-
2
- ---> ServiceClient failure for DeepLeo[/ERROR]</p>
3
- <p>If you want to learn more about the BCM92045NMD driver and other Bluetooth drivers, you can visit the official websites of your laptop manufacturer, Broadcom, or other reliable sources. You can also check out some online forums, blogs, or videos that offer more tips and tricks on how to use and optimize your Bluetooth module. You can also share your own experiences and feedback with other users who have the same device as you. By doing so, you can improve your knowledge and skills on Bluetooth technology, and enjoy its benefits more.</p>
4
- <h2>bcm92045nmd driver download</h2><br /><p><b><b>Download Zip</b> &middot;&middot;&middot;&middot;&middot; <a href="https://imgfil.com/2uy0oa">https://imgfil.com/2uy0oa</a></b></p><br /><br />
5
- <p>If you have any problems or issues with the BCM92045NMD driver or your Bluetooth module, you can contact the customer support or technical support of your laptop manufacturer, Broadcom, or the driver updater tool that you used. They can provide you with more assistance and guidance on how to solve your problems or issues. You can also check if there are any FAQs or troubleshooting guides available on their websites that can help you with your questions or concerns. By doing so, you can get more professional and reliable help on your Bluetooth module and driver.</p>
6
- <h2>How to Uninstall the BCM92045NMD Driver</h2>
7
- <p>If you want to uninstall the BCM92045NMD driver from your laptop, you can do so by using a device manager or a driver uninstaller tool. Here are the steps to uninstall the BCM92045NMD driver using a device manager:</p>
8
- <ol>
9
- <li>Press Windows + X keys on your keyboard and select Device Manager from the menu.</li>
10
- <li>Expand the Bluetooth category and find your BCM92045NMD (BRCM1018) device.</li>
11
- <li>Right-click on it and select Uninstall Device from the menu.</li>
12
- <li>Check the box that says Delete the driver software for this device and click on Uninstall.</li>
13
- <li>Restart your laptop when prompted.</li>
14
- </ol>
15
- <p>If you want to use a driver uninstaller tool instead of a device manager, you can download and install a reputable tool such as IObit Uninstaller, Revo Uninstaller, or Geek Uninstaller. These tools can scan your laptop for unwanted drivers and uninstall them completely with one click. Here are
16
- the steps to uninstall the BCM92045NMD driver using a driver uninstaller tool:</p>
17
- <ol>
18
- <li>Download and install a driver uninstaller tool of your choice from its official website.</li>
19
- <li>Launch the tool and click on drivers or tools section of the tool.</li>
20
- <li>Find your BCM92045NMD (BRCM1018) device and click on uninstall or remove button next to it.</li>
21
- <li>Wait for the tool to uninstall the BCM92045NMD driver from your device.</li>
22
- <li>Restart your laptop when prompted.</li>
23
- </ol>
24
-
25
- <h2>How to Backup and Restore the BCM92045NMD Driver</h2>
26
-
27
- <p>If you want to backup and restore the BCM92045NMD driver on your laptop, you can do so by using a driver backup and restore tool. This can help you in case you need to reinstall or update your driver, or if you encounter any problems or issues with your driver. Here are
28
- the steps to backup and restore the BCM92045NMD driver using a driver backup and restore tool:</p>
29
-
30
- <ol>
31
-
32
- <li>Download and install a driver backup and restore tool of your choice from its official website. Some of the popular tools are Driver Magician, DriverMax, and Driver Genius.</li>
33
-
34
- <li>Launch the tool and click on backup or export section of the tool.</li>
35
-
36
- <li>Select your BCM92045NMD (BRCM1018) device and click on backup or export button next to it.</li>
37
-
38
- <li>Choose a location and a name for your backup file and click on save or ok.</li>
39
-
40
- <li>Wait for the tool to backup the BCM92045NMD driver on your computer.</li>
41
-
42
- <li>To restore the BCM92045NMD driver, launch the tool again and click on restore or import section of the tool.</li>
43
-
44
- <li>Select your backup file and click on restore or import button next to it.</li>
45
-
46
- <li>Wait for the tool to restore the BCM92045NMD driver on your device.</li>
47
-
48
- <li>Restart your laptop when prompted.</li>
49
-
50
- </ol>
51
-
52
- <h2>How to Fix Some Common Errors with BCM92045NMD Driver</h2>
53
-
54
- <p>Sometimes, even after installing, updating, or uninstalling your BCM92045NMD driver, you may still encounter some errors with your Bluetooth module, such as code 10, code 43, code 52, or code 28 errors. These errors indicate that there is something wrong with your device or driver, and may prevent you from using your Bluetooth module properly. Here are some of
55
- the common errors with BCM92045NMD driver and how to fix them:</p>
56
-
57
- <ul>
58
-
59
- <li>If you see a code 10 error, which means that your device cannot start, you may need to update or reinstall your driver. To do so, follow the steps in the previous sections of this article on how to update or reinstall your BCM92045NMD driver.</li>
60
-
61
- <li>If you see a code 43 error, which means that Windows has stopped your device because it has reported problems, you may need to check if there is any hardware issue with your device. To do so,
62
- turn off your laptop and disconnect any external devices,
63
- such as USB drives,
64
- printers,
65
- or monitors.
66
- Then open
67
- your laptop case
68
- and locate
69
- your Bluetooth module.
70
- Make sure that it is properly connected
71
- and secured
72
- to
73
- your motherboard.
74
- If not,
75
- reconnect
76
- or replace
77
- it if needed.
78
- Then close
79
- your laptop case
80
- and turn on
81
- your laptop.
82
- Then check if
83
- your Bluetooth module works properly.
84
- If not,
85
- you may need
86
- to update or reinstall
87
- your driver as explained above.
88
- </li>
89
-
90
- <li>If you see a code 52 error, which means that Windows cannot verify the digital signature for your driver, you may need to disable driver signature enforcement. To do so,
91
- restart your laptop and press F8 key repeatedly until you enter Advanced Boot Options menu.
92
- Then select Disable Driver Signature Enforcement from
93
- the list
94
- and press Enter.
95
- Then wait for Windows to load normally.
96
- Then check if
97
- your Bluetooth module works properly.
98
- If not,
99
- you may need
100
- to update or reinstall
101
- your driver as explained above.
102
- </li>
103
-
104
- <li>If you see a code 28 error, which means that your device drivers are not installed, you may need to install them manually. To do so,
105
- follow the steps in the previous sections of this article on how to download and install your BCM92045NMD driver.</li>
106
-
107
- </ul>
108
- <p>If you want to enhance your Bluetooth experience and enjoy more features and functions with your Bluetooth module, you can also download and install some Bluetooth software or applications that are compatible with your device and driver. Some of the popular Bluetooth software or applications are Bluetooth File Transfer, Bluetooth Driver Installer, Bluetooth View, and Bluetooth Remote Control. These software or applications can help you to transfer files, install drivers, monitor devices, and control devices using your Bluetooth module. You can find these software or applications on various websites or online stores, such as Google Play, Microsoft Store, or CNET. However, make sure that you download and install them from reliable and safe sources, and that you check their reviews and ratings before using them.</p>
109
- <p>If you have any feedback or suggestions on the BCM92045NMD driver or your Bluetooth module, you can also contact the customer service or technical support of your laptop manufacturer, Broadcom, or the driver updater tool that you used. They can provide you with more information and guidance on how to improve your Bluetooth module and driver. You can also check if there are any surveys or feedback forms available on their websites that can help you to share your opinions and experiences with them. By doing so, you can help them to improve their products and services, and also get some rewards or discounts for your participation.</p>
110
- <h2>How to Connect Your Bluetooth Device to Your Laptop Using BCM92045NMD Driver</h2>
111
- <p>Once you have installed or updated your BCM92045NMD driver on your laptop, you can connect your Bluetooth device to your laptop using the driver. This can help you to use your Bluetooth device with your laptop, such as listening to music, making calls, typing, or gaming. Here are the steps to connect your Bluetooth device to your laptop using BCM92045NMD driver:</p>
112
- <p></p>
113
- <ol>
114
- <li>Turn on your Bluetooth device and make sure that it is in pairing mode. You can check the manual or the website of your device for instructions on how to do so.</li>
115
- <li>Press Windows + I keys on your keyboard and select Devices from the settings menu.</li>
116
- <li>Click on Bluetooth & other devices from the left pane.</li>
117
- <li>Click on Add Bluetooth or other device from the right pane.</li>
118
- <li>Select Bluetooth from the list of options.</li>
119
- <li>Wait for Windows to scan for and display a list of available devices.</li>
120
- <li>Find your Bluetooth device and click on it.</li>
121
- <li>If prompted, enter a PIN code or confirm a pairing request on your device and on your laptop.</li>
122
- <li>Wait for Windows to connect your device to your laptop.</li>
123
- <li>You can now use your Bluetooth device with your laptop.</li>
124
- </ol>
125
-
126
- <h2>How to Check the Status and Information of Your BCM92045NMD Driver</h2>
127
-
128
- <p>If you want to check the status and information of your BCM92045NMD driver on your laptop, you can do so by using a device manager or a driver information tool. This can help you to see if your driver is working properly, and what version and date it has. Here are
129
- the steps to check the status and information of your BCM92045NMD driver using a device manager:</p>
130
-
131
- <ol>
132
-
133
- <li>Press Windows + X keys on your keyboard and select Device Manager from the menu.</li>
134
-
135
- <li>Expand the Bluetooth category and find your BCM92045NMD (BRCM1018) device.</li>
136
-
137
- <li>Right-click on it and select Properties from the menu.</li>
138
-
139
- <li>Click on the General tab to see the status and description of your device.</li>
140
-
141
- <li>Click on the Driver tab to see the driver provider, date, version, and digital signer of your driver.</li>
142
-
143
- <li>You can also click on Update Driver, Roll Back Driver, Disable Device, or Uninstall Device buttons to perform different actions on your driver.</li>
144
-
145
- </ol>
146
-
147
- <p>If you want to use a driver information tool instead of a device manager, you can download and install a reputable tool such as DriverView, DriverEasy, or DriverIdentifier. These tools can scan your laptop for all drivers and display detailed information about them. Here are
148
- the steps to check the status and information of your BCM92045NMD driver using a driver information tool:</p>
149
-
150
- <ol>
151
-
152
- <li>Download and install a driver information tool of your choice from its official website.</li>
153
-
154
- <li>Launch the tool and click on scan or view button.</li>
155
-
156
- <li>Wait for the tool to scan your laptop for all drivers and display a list of them.</li>
157
-
158
- <li>Find your BCM92045NMD (BRCM1018) device and click on it.</li>
159
-
160
- <li>You can see various information about your driver, such as name, description, version, date, manufacturer, location, file name, size, type, status, and more.</li>
161
-
162
- <li>You can also click on different buttons or links to perform different actions on your driver, such as update, backup, restore, uninstall, or export.</li>
163
-
164
- </ol>
165
- <h2>Conclusion</h2>
166
- <p>The BCM92045NMD (BRCM1018) is a Bluetooth module that is installed in many older HP and Lenovo laptops, and allows you to connect your laptop to other Bluetooth devices. However, you may need to download and install the latest BCM92045NMD driver for your laptop, and update it regularly, to ensure that your Bluetooth module works properly and efficiently. In this article, we showed you how to download, install, update, uninstall, backup, restore, connect, and troubleshoot the BCM92045NMD driver for your laptop, using different methods and tools. We hope that this article was helpful for you, and that you were able to fix any issues with your Bluetooth module and driver. If you have any questions or comments, please feel free to leave them below.</p> 3cee63e6c2<br />
167
- <br />
168
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/English Vinglish Full Movie Download WorldHigh Quality Free4u 23.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>English Vinglish Full Movie Download Worldfree4u 23</h2><br /><p><b><b>Download</b> &#128279; <a href="https://imgfil.com/2uxYIO">https://imgfil.com/2uxYIO</a></b></p><br /><br />
2
-
3
- Salaam Namaste Hindi Movie Online - Saif Ali Khan, Preity Zinta, Arshad Warsi and ... Amazon.com: Fanaa Bollywood DVD With English Subtitles: Aamir Khan, Kajol, Yash Chopra, Kunal ... English Vinglish - really lovely, heartwarming movie. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Among Us The Most Popular Game of 2023 Now Available for Android 5.1.1.md DELETED
@@ -1,150 +0,0 @@
1
-
2
- <h1>Download Among Us Android 5.1.1: How to Play the Popular Game on Your Phone</h1>
3
- <p>Among Us is one of the most popular games of 2022 and 2023, with millions of players around the world enjoying its thrilling and hilarious gameplay. If you want to join the fun, you can download Among Us Android 5.1.1 on your phone and play it anytime, anywhere. In this article, we will show you how to download and install the game, how to play it, and some tips and tricks to make your experience even better.</p>
4
- <h2>What is Among Us?</h2>
5
- <h3>A brief introduction to the game and its features</h3>
6
- <p>Among Us is a multiplayer game that can be played online or over local WiFi with 4-15 players. The game is set in a spaceship, where you can choose to be either a crewmate or an impostor. As a crewmate, your goal is to complete tasks around the ship and find out who the impostor is before they kill everyone. As an impostor, your goal is to kill crewmates, sabotage the ship, and avoid being caught.</p>
7
- <h2>download among us android 5.1.1</h2><br /><p><b><b>Download</b> ===== <a href="https://urlin.us/2uSZb4">https://urlin.us/2uSZb4</a></b></p><br /><br />
8
- <p>The game has different modes, maps, roles, and settings that you can customize according to your preferences. You can also change your character's appearance, name, color, hat, pet, and skin. The game is fun, easy to play, and suitable for all ages.</p>
9
- <h3>Why is it so popular?</h3>
10
- <p>Among Us became a viral sensation in late 2022, thanks to its unique gameplay, social interaction, and meme potential. The game is highly addictive, as each round is different and unpredictable. You never know who the impostor is, who you can trust, or what will happen next.</p>
11
- <p>The game is also very entertaining, as you can chat with other players using text or voice, accuse each other of being the impostor, lie, bluff, joke, or cooperate. The game can create hilarious moments, tense situations, and dramatic twists that will keep you hooked.</p>
12
- <p>The game is also very accessible, as it can be played on various devices, such as PC, iOS, Android, Nintendo Switch, Xbox One, PlayStation 4/5 etc., with cross-platform compatibility. You can play with your friends or strangers from anywhere in the world.</p>
13
- <h2>How to download Among Us Android 5.1.1</h2>
14
- <h3>Requirements and compatibility</h3>
15
- <p>To download Among Us Android 5.1.1 on your phone, you need to have an Android device that runs on Android 5.0 or higher (Lollipop) and has at least 250 MB of free storage space. The game is compatible with most Android devices that meet these requirements.</p>
16
- <h3>Steps to download and install the game from Google Play Store</h3>
17
- <p>The easiest way to download Among Us Android 5.1.1 on your phone is to use the Google Play Store app on your device. Here are the steps:</p>
18
- <ol>
19
- <li>Open the Google Play Store app on your phone.</li>
20
- <li>Search for "Among Us" in the search bar.</li>
21
- <li>Select the game from the results and tap on "Install".</li>
22
- <li>Wait for the game to download and install on your phone.</li>
23
- <li>Once the installation is complete, tap on "Open" to launch the game.</li>
24
- </ol>
25
- <p>Congratulations, you have successfully downloaded Among Us Android 5.1.1 on your phone. You can now enjoy playing the game with your friends or other players online.</p>
26
- <h3>Steps to download and install the game from APK file</h3>
27
- <p>If you cannot access the Google Play Store app on your phone, or if you want to download a different version of the game, you can use an APK file instead. An APK file is a package file that contains the installation files of an Android app. Here are the steps:</p>
28
- <p>How to download among us on android 5.1.1<br />
29
- Among us apk download for android 5.1.1<br />
30
- Download among us mod menu for android 5.1.1<br />
31
- Among us game download free for android 5.1.1<br />
32
- Download among us latest version for android 5.1.1<br />
33
- Among us download link for android 5.1.1<br />
34
- Download among us hack for android 5.1.1<br />
35
- Among us download size for android 5.1.1<br />
36
- Download among us offline for android 5.1.1<br />
37
- Among us download pc and android 5.1.1<br />
38
- Download among us update for android 5.1.1<br />
39
- Among us download play store for android 5.1.1<br />
40
- Download among us online for android 5.1.1<br />
41
- Among us download error for android 5.1.1<br />
42
- Download among us beta for android 5.1.1<br />
43
- Among us download without google play for android 5.1.1<br />
44
- Download among us airship map for android 5.1.1<br />
45
- Among us download from website for android 5.1.1<br />
46
- Download among us no ads for android 5.1.1<br />
47
- Among us download with friends for android 5.1.1<br />
48
- Download among us cracked for android 5.1.1<br />
49
- Among us download in laptop for android 5.1.1<br />
50
- Download among us skins for android 5.1.1<br />
51
- Among us download uptodown for android 5.1.1<br />
52
- Download among us voice chat for android 5.1.1<br />
53
- Among us download windows 10 and android 5.1.1<br />
54
- Download among us pets for android 5.1.1<br />
55
- Among us download apkpure for android 5.1.1<br />
56
- Download among us costumes for android 5.1.1<br />
57
- Among us download chromebook and android 5.1.1<br />
58
- Download among us hats for android 5.1.1<br />
59
- Among us download apk mirror for android 5.1.1<br />
60
- Download among us roles for android 5.1.1<br />
61
- Among us download mac and android 5.1.1<br />
62
- Download among us wallpapers for android 5.1.1<br />
63
- Among us download happy mod for android 5.1.</p>
64
- <ol>
65
- <li>Go to a trusted website that provides APK files for Android apps, such as APKPure, APKMirror, or Uptodown.</li>
66
- <li>Search for "Among Us" in the website's search bar.</li>
67
- <li>Select the version of the game that you want to download, such as Among Us Android 5.1.1.</li>
68
- <li>Tap on "Download APK" and wait for the file to download on your phone.</li>
69
- <li>Before installing the APK file, you need to enable "Unknown sources" on your phone's settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.</li>
70
- <li>Locate the downloaded APK file on your phone's file manager and tap on it.</li>
71
- <li>Follow the instructions on the screen to install the game on your phone.</li>
72
- <li>Once the installation is complete, tap on "Open" to launch the game.</li>
73
- </ol>
74
- <p>Congratulations, you have successfully downloaded Among Us Android 5.1.1 on your phone using an APK file. You can now enjoy playing the game with your friends or other players online.</p>
75
- <h2>How to play Among Us on Android 5.1.1</h2>
76
- <h3>How to join or host a game online or over local WiFi</h3>
77
- <p>To play Among Us on Android 5.1.1, you need to join or host a game online or over local WiFi. Here are the steps:</p>
78
- <ol>
79
- <li>Launch the game on your phone and tap on "Online" or "Local".</li>
80
- <li>If you want to join a game online, you can either enter a code from a friend or browse public games available in different regions and modes. Tap on a game that suits your preferences and wait for it to start.</li>
81
- <li>If you want to host a game online, you can either create a private game or a public game. Tap on "Create Game" and choose a map, mode, number of players, and other settings. You can also invite your friends by sharing your code with them. Tap on "Start" when you are ready to begin.</li>
82
- <li>If you want to join or host a game over local WiFi, you need to be connected to the same WiFi network as other players. Tap on "Join Game" or "Create Game" and follow the same steps as above.</li>
83
- </ol>
84
- <p>You have successfully joined or hosted a game online or over local WiFi. You can now play Among Us with other players as a crewmate or an impostor.</p>
85
- <h3>How to customize your character and settings</h3>
86
- <p>To make your gameplay more fun and personalized, you can customize your character and settings in Among Us Android 5.1.1. Here are the steps:</p>
87
- <ol>
88
- <li>To customize your character, tap on the laptop icon in the lobby or in-game menu. You can change your name, color, hat, pet, and skin by tapping on the options available. You can also buy more items from the shop using real money or watching ads.</li>
89
- <li>To customize your settings, tap on the gear icon in the main menu or in-game menu. You can change your language, sound effects, music volume, chat type (free chat or quick chat), censor chat (on or off), confirm ejects (on or off), and other options by tapping on them.</li>
90
- </ol>
91
- <p>You have successfully customized your character and settings in Among Us Android 5.1.1. You can now play the game with more style and comfort.</p>
92
- <h3>How to communicate with other players using chat or voice</h3>
93
- <p>To communicate with other players in Among Us Android 5.1.1, you can use chat or voice features in the game. Here are the steps:</p>
94
- <ol>
95
- <li>To use chat, tap on the chat icon in the lobby or in-game menu. You can type messages using free chat or select pre defined phrases using quick chat. You can also use emojis and stickers to express yourself. You can chat with everyone or only with your team, depending on the game mode and situation.</li>
96
- <li>To use voice, you need to use a third-party app, such as Discord, Zoom, or Skype, to create or join a voice call with other players. You can also use the in-game voice chat feature, which is currently in beta testing and may not work properly. To use the in-game voice chat, tap on the microphone icon in the lobby or in-game menu and grant permission to access your microphone. You can mute or unmute yourself or other players by tapping on their icons.</li>
97
- </ol>
98
- <p>You have successfully communicated with other players using chat or voice in Among Us Android 5.1.1. You can now talk, strategize, accuse, lie, or joke with other players during the game.</p>
99
- <h3>How to complete tasks or sabotage as a crewmate or impostor</h3>
100
- <p>To play your role as a crewmate or impostor in Among Us Android 5.1.1, you need to complete tasks or sabotage the ship. Here are the steps:</p>
101
- <ol>
102
- <li>To complete tasks as a crewmate, tap on the map icon in the upper right corner of the screen. You will see a list of tasks that you need to do and their locations on the map. You can also see yellow exclamation marks on the map that indicate where your tasks are. Tap on the map to close it and go to the task locations. Tap on the task icon to start the task and follow the instructions on the screen to finish it. Some tasks are simple and quick, while others are complex and long. Some tasks are also common or visual, which means that other players can see you doing them or verify that you have done them.</li>
103
- <li>To sabotage as an impostor, tap on the sabotage icon in the lower right corner of the screen. You will see a map of the ship with different icons that represent different sabotage options. You can sabotage doors, lights, communications, oxygen, reactor, or electrical by tapping on their icons. Some sabotages require you to be near them, while others can be done from anywhere. Some sabotages also require two impostors to coordinate, while others can be done by one impostor alone. Sabotaging can help you kill crewmates, create chaos, divert attention, or win the game.</li>
104
- </ol>
105
- <p>You have successfully completed tasks or sabotaged as a crewmate or impostor in Among Us Android 5.1.1. You can now play your role effectively and help your team win the game.</p>
106
- <h2>Tips and tricks for playing Among Us on Android 5.1.1</h2>
107
- <h3>How to use maps, vents, cameras, and other features</h3>
108
- <p>To improve your gameplay and skills in Among Us Android 5.1.1, you can use maps, vents, cameras, and other features in the game. Here are some tips:</p>
109
- <ul>
110
- <li>Use maps to navigate the ship and find your tasks or sabotage locations. You can also use maps to see where other players are and where dead bodies are reported.</li>
111
- <li>Use vents to move around the ship quickly and stealthily as an impostor. You can only use vents that are connected to each other and only when no one is watching you.</li>
112
- <li>Use cameras to monitor other players' activities and movements on certain maps, such as The Skeld or Polus. You can also use cameras to catch impostors venting or killing.</li>
113
- <li>Use other features such as admin table, vitals monitor, door log, security log, etc., to gather information about other players' locations, statuses, behaviors, etc.</li>
114
- </ul>
115
- <p>You have successfully used maps, vents, cameras, and other features in Among Us Android 5.1.1. You can now play smarter and more strategically in the game.</p>
116
- <h3>How to spot and vote out the impostor</h3>
117
- <p>To win as a crewmate in Among Us Android 5.1.1 , you need to spot and vote out the impostor before they kill everyone. Here are some tips:</p>
118
- <ul>
119
- <li>Pay attention to other players' behaviors and movements. Look for signs of suspicious or inconsistent actions, such as lying, faking tasks, venting, killing, etc.</li>
120
- <li>Use your logic and deduction skills to narrow down the suspects and find contradictions or loopholes in their alibis or stories.</li>
121
- <li>Use your communication and persuasion skills to convince other players of your innocence and accuse the impostor. Use chat or voice features to share your evidence, arguments, or opinions.</li>
122
- <li>Use your voting and cooperation skills to vote out the impostor with the majority of the crewmates. Be careful not to vote out an innocent player or skip voting when the impostor has an advantage.</li>
123
- </ul>
124
- <p>You have successfully spotted and voted out the impostor in Among Us Android 5.1.1. You can now win as a crewmate and save the ship.</p>
125
- <h3>How to deceive and kill as the impostor</h3>
126
- <p>To win as an impostor in Among Us Android 5.1.1, you need to deceive and kill the crewmates before they complete their tasks or find you out. Here are some tips:</p>
127
- <ul>
128
- <li>Pay attention to other players' behaviors and movements. Look for opportunities to isolate, kill, or frame them without being seen or caught.</li>
129
- <li>Use your deception and manipulation skills to lie, fake tasks, sabotage, vent, etc., without arousing suspicion or exposing yourself.</li>
130
- <li>Use your communication and persuasion skills to defend yourself and accuse others of being the impostor. Use chat or voice features to deny, divert, or confuse other players.</li>
131
- <li>Use your voting and cooperation skills to vote out the crewmates with the help of your fellow impostor or by tricking the crewmates. Be careful not to vote out your fellow impostor or reveal yourself by voting wrongly.</li>
132
- </ul>
133
- <p>You have successfully deceived and killed as the impostor in Among Us Android 5.1.1. You can now win as an impostor and destroy the ship.</p>
134
- <h2>Conclusion</h2>
135
- <p>In conclusion, Among Us Android 5.1.1 is a fun and exciting game that you can download and play on your phone with your friends or other players online. The game is easy to play, but challenging to master, as you need to use your skills, strategies, and creativity to play your role as a crewmate or an impostor. The game is also very entertaining, as you can chat, joke, accuse, lie, or cooperate with other players during the game. The game is also very customizable, as you can change your character's appearance, settings, modes, maps, roles, etc., according to your preferences. The game is also very accessible, as it can be played on various devices with cross-platform compatibility.</p>
136
- <p>If you want to download Among Us Android 5.1.1 on your phone and play it anytime, anywhere, you can follow the steps in this article to download and install the game from Google Play Store or APK file. You can also follow the steps in this article to play the game, communicate with other players, complete tasks or sabotage as a crewmate or impostor, and use maps, vents, cameras, and other features in the game. You can also use the tips and tricks in this article to improve your gameplay and skills in Among Us Android 5.1.1.</p>
137
- <p>We hope you enjoyed this article and found it helpful and informative. If you have any questions or feedback about Among Us Android 5.1.1 or this article, please feel free to leave a comment below. Thank you for reading and happy gaming!</p>
138
- <h2>FAQs</h2>
139
- <h3>Q: How much does Among Us Android 5.1.1 cost?</h3>
140
- <p>A: Among Us Android 5.1.1 is free to download and play on your phone from Google Play Store or APK file. However, you can also buy some items from the shop using real money or watching ads.</p>
141
- <h3>Q: Is Among Us Android 5.1.1 safe to download and play?</h3>
142
- <p>A: Yes, Among Us Android 5.1.1 is safe to download and play on your phone if you use a trusted source such as Google Play Store or a reputable website that provides APK files for Android apps.</p>
143
- <h3>Q: Can I play Among Us Android 5.1.1 offline?</h3>
144
- <p>A: No, you cannot play Among Us Android 5.1.1 offline on your phone. You need an internet connection to play online or over local WiFi with other players.</p>
145
- <h3>Q: Can I play Among Us Android 5.1.1 with PC or iOS players?</h3>
146
- <p>A: Yes, you can play Among Us Android 5.1.1 with PC or iOS players, as the game has cross-platform compatibility. You just need to join or host a game online using the same code or region as them.</p>
147
- <h3>Q: How can I update Among Us Android 5.1.1 to the latest version?</h3>
148
- <p>A: To update Among Us Android 5.1.1 to the latest version, you need to check for updates on Google Play Store or the website that provides APK files for Android apps. You can also enable automatic updates on your phone's settings to get the latest version of the game as soon as it is available.</p> 197e85843d<br />
149
- <br />
150
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DRIFT SPIRITS MOD APK Everything You Need to Know.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>Drift Spirits Mod APK: Enjoy the Ultimate Drifting Experience</h1>
3
- <p>Do you love drifting games? Do you want to feel the thrill of sliding your car around corners at high speeds? If yes, then you should try Drift Spirits, one of the best drifting games for Android devices. And if you want to make your drifting experience even more exciting, you should download Drift Spirits Mod APK, a modified version of the game that gives you unlimited money, gold, cars, and more. In this article, we will tell you everything you need to know about Drift Spirits and Drift Spirits Mod APK, including their features, benefits, and how to download and install them on your device.</p>
4
- <h2>What is Drift Spirits?</h2>
5
- <p>Drift Spirits is a racing simulator game developed by Bandai Namco Entertainment, a famous Japanese game company. The game is dedicated to drifting, a driving technique that involves oversteering your car to make it slide sideways. The game features realistic graphics and physics, over 100 cars from various manufacturers, different modes and challenges, online multiplayer and leaderboards, and more. You can customize your car with various parts, paint jobs, stickers, and decals. You can also compete with other players from around the world in online battles and events. The game is free to play but contains in-app purchases.</p>
6
- <h2>drift spirits mod apk</h2><br /><p><b><b>Download Zip</b> &#10038;&#10038;&#10038; <a href="https://urlin.us/2uSZKS">https://urlin.us/2uSZKS</a></b></p><br /><br />
7
- <h3>Features of Drift Spirits</h3>
8
- <h4>- Realistic graphics and physics</h4>
9
- <p>The game boasts stunning 3D graphics that make you feel like you are driving a real car on a real track. The game also uses a sophisticated physics engine that simulates the behavior of the car based on its speed, weight, traction, suspension, tires, etc. You can see the smoke, sparks, dust, and skid marks as you drift your car. You can also hear the engine sound, tire screech, collision noise, etc.</p>
10
- <h4>- Over 100 cars to choose from</h4>
11
- <p>The game offers a huge collection of cars from various brands such as Toyota, Nissan, Honda, Mazda, Subaru, Mitsubishi, BMW, Mercedes-Benz, Ferrari, Lamborghini, etc. You can find classic cars like AE86 Trueno, Skyline GT-R R34, RX-7 FD3S, etc., as well as modern cars like Supra GR A90, GT-R R35 Nismo Edition 2020 Model Year Spec V Package (N Attack Package), NSX Type R 2020 Model Year Spec V Package (N Attack Package), etc. You can also unlock special cars from anime series like Initial D.</p>
12
- <p>drift spirits mod apk unlimited money<br />
13
- drift spirits mod apk latest version<br />
14
- drift spirits mod apk android 1<br />
15
- drift spirits mod apk offline<br />
16
- drift spirits mod apk no root<br />
17
- drift spirits mod apk free download<br />
18
- drift spirits mod apk 2023<br />
19
- drift spirits mod apk unlimited gold<br />
20
- drift spirits mod apk rexdl<br />
21
- drift spirits mod apk revdl<br />
22
- drift spirits mod apk hack<br />
23
- drift spirits mod apk obb<br />
24
- drift spirits mod apk english version<br />
25
- drift spirits mod apk unlimited nitro<br />
26
- drift spirits mod apk unlimited bp<br />
27
- drift spirits mod apk data<br />
28
- drift spirits mod apk pure<br />
29
- drift spirits mod apk happymod<br />
30
- drift spirits mod apk mega<br />
31
- drift spirits mod apk mediafire<br />
32
- drift spirits mod apk vip<br />
33
- drift spirits mod apk pro<br />
34
- drift spirits mod apk premium<br />
35
- drift spirits mod apk full unlocked<br />
36
- drift spirits mod apk all cars unlocked<br />
37
- drift spirits mod apk high damage<br />
38
- drift spirits mod apk god mode<br />
39
- drift spirits mod apk anti ban<br />
40
- drift spirits mod apk online<br />
41
- drift spirits mod apk 2.1.2<br />
42
- drift spirits mod apk 2.0.0<br />
43
- drift spirits mod apk 1.9.9<br />
44
- drift spirits mod apk 1.8.8<br />
45
- drift spirits mod apk 1.7.7<br />
46
- drift spirits mod apk 1.6.6<br />
47
- drift spirits mod apk 1.5.5<br />
48
- drift spirits mod apk 1.4.4<br />
49
- drift spirits mod apk 1.3.3<br />
50
- drift spirits mod apk 1.2.2<br />
51
- drift spirits mod apk 1.1.1<br />
52
- download drift spirits mod apk for android<br />
53
- download drift spirits mod apk for ios<br />
54
- download drift spirits mod apk for pc<br />
55
- download drift spirits mod apk for laptop<br />
56
- download drift spirits mod apk for windows 10<br />
57
- download drift spirits mod apk for macbook pro<br />
58
- download drift spirits mod apk for chromebook</p>
59
- <h4>- Various modes and challenges</h4>
60
- <p>The game has different modes and challenges for you to enjoy. You can play the Story Mode where you can follow the story of various characters and their drifting adventures. You can also play the Event Mode where you can participate in limited-time events and win rewards. You can also play the Battle Mode where you can challenge other players in online matches. You can also play the Time Attack Mode where you can try to beat your own or other players' records.</p>
61
- <h4>- Online multiplayer and leaderboards</h4>
62
- <p>The game allows you to compete with other players from around the world in online battles and events. You can also join a team or create your own and cooperate with other players. You can also chat with other players and send them stickers and emojis. The game has a ranking system that shows your position and performance in various categories. You can also view the replays of your or other players' drifts and learn from them.</p>
63
- <h2>What is Drift Spirits Mod APK?</h2>
64
- <p>Drift Spirits Mod APK is a modified version of the original Drift Spirits game that gives you some extra benefits and features that are not available in the official version. The mod APK is created by third-party developers who modify the game files and add some codes to unlock some features. The mod APK is not affiliated with or endorsed by Bandai Namco Entertainment or any of its partners.</p>
65
- <h3>Benefits of Drift Spirits Mod APK</h3>
66
- <h4>- Unlimited money and gold</h4>
67
- <p>One of the main benefits of Drift Spirits Mod APK is that it gives you unlimited money and gold, which are the main currencies in the game. You can use them to buy new cars, upgrade your existing cars, customize your cars, buy parts, etc. You don't have to worry about running out of money or gold or spending real money to buy them.</p>
68
- <h4>- All cars unlocked and upgraded</h4>
69
- <p>Another benefit of Drift Spirits Mod APK is that it gives you access to all the cars in the game, including the special and rare ones. You don't have to complete any missions or events or spend any money or gold to unlock them. You can also upgrade your cars to the maximum level without any restrictions or costs. You can enjoy driving any car you want with the best performance and appearance.</p>
70
- <h4>- No ads and no root required</h4>
71
- <p>A third benefit of Drift Spirits Mod APK is that it removes all the ads from the game, which can be annoying and distracting. You can play the game without any interruptions or pop-ups. You also don't need to root your device to install the mod APK, which can be risky and complicated. You can install the mod APK on any Android device without any problems.</p>
72
- <h2>How to download and install Drift Spirits Mod APK?</h2>
73
- <p>If you are interested in downloading and installing Drift Spirits Mod APK on your device, you need to follow some simple steps. However, before you do that, you need to make sure that you have enough storage space on your device, a stable internet connection, and a compatible Android version (4.1 or higher). You also need to enable the installation of apps from unknown sources on your device settings. Here are the steps to download and install Drift Spirits Mod APK:</p>
74
- <h3>Steps to download and install Drift Spirits Mod APK</h3>
75
- <h4>- Download the APK and OBB files from a trusted source</h4>
76
- <p>The first step is to download the APK and OBB files of Drift Spirits Mod APK from a reliable source. You can find many websites that offer these files, but you need to be careful as some of them may contain viruses or malware. You can use this link as an example, but you can also search for other sources if you want.</p>
77
- <h4>- Install the APKMODY Installer app from Google Play or APK file</h4>
78
- <p>The second step is to install the APKMODY Installer app on your device. This app is a tool that helps you install modded games with OBB files easily and safely. You can download it from Google Play Store or from this link if you prefer.</p>
79
- <h4>- Open the APKMODY Installer app and select Install APKs</h4>
80
- <p>The third step is to open the APKMODY Installer app on your device and select Install APKs from the menu. This will allow you to browse your device's storage and find the downloaded APK and OBB files of Drift Spirits Mod APK.</p>
81
- <h4>- Navigate to the location of the downloaded APK and OBB files and select them</h4>
82
- <p>The fourth step is to navigate to the location where you saved the downloaded APK and OBB files of Drift Spirits Mod APK on your device's storage. You can use any file manager app to do this. Once you find them, select them both and tap on Install.</p>
83
- <h4>- Wait for the installation to complete and launch the game</h4>
84
- <p>The fifth and final step is to wait for the installation process to finish. It may take a few minutes depending on your device's speed and performance. Once it is done, you can launch the game from your app drawer or home screen and enjoy it.</p>
85
- <h2>Conclusion</h2>
86
- <p>Drift Spirits is an amazing drifting game that lets you experience the thrill of sliding your car around corners at high speeds. It has realistic graphics and physics, over 100 cars to choose from, various modes and challenges, online multiplayer and leaderboards, and more. You can customize your car with various parts, paint jobs, stickers, and decals. You can also compete with other players from around the world in online battles and events. Drift Spirits Mod APK is a modified version of the original game that gives you unlimited money, gold, cars, and more. You can buy new cars, upgrade your existing cars, customize your cars, etc. without any restrictions or costs. You can also enjoy the game without any ads or root requirements. You can download and install Drift Spirits Mod APK on your device by following some simple steps. We hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy drifting!</p>
87
- <h3>FAQs</h3>
88
- <p>Here are some frequently asked questions about Drift Spirits and Drift Spirits Mod APK:</p>
89
- <table>
90
- <tr>
91
- <th>Question</th>
92
- <th>Answer</th>
93
- </tr>
94
- <tr>
95
- <td>Is Drift Spirits Mod APK safe to use?</td>
96
- <td>Drift Spirits Mod APK is generally safe to use, as long as you download it from a trusted source and scan it with an antivirus app before installing it. However, there is always a risk of getting banned or losing your data when using modded games, so use it at your own discretion and responsibility.</td>
97
- </tr>
98
- <tr>
99
- <td>Can I play Drift Spirits Mod APK offline?</td>
100
- <td>No, you cannot play Drift Spirits Mod APK offline. The game requires an internet connection to run properly and access all the features. You also need to log in with your Bandai Namco ID or Facebook account to play the game.</td>
101
- </tr>
102
- <tr>
103
- <td>Can I update Drift Spirits Mod APK?</td>
104
- <td>No, you cannot update Drift Spirits Mod APK from the Google Play Store or the official website. If you want to update the game, you need to download and install the latest version of the mod APK from the same source where you got it. You may also need to uninstall the previous version of the mod APK before installing the new one.</td>
105
- </tr>
106
- <tr>
107
- <td>Can I transfer my progress from Drift Spirits to Drift Spirits Mod APK or vice versa?</td>
108
- <td>No, you cannot transfer your progress from Drift Spirits to Drift Spirits Mod APK or vice versa. The mod APK uses a different server and database than the original game, so they are not compatible with each other. If you want to switch between the two versions of the game, you need to start from scratch.</td>
109
- </tr>
110
- <tr>
111
- <td>Can I play Drift Spirits Mod APK with my friends who are using the original game?</td>
112
- <td>No, you cannot play Drift Spirits Mod APK with your friends who are using the original game. The mod APK uses a different server and database than the original game, so they are not compatible with each other. If you want to play with your friends, you need to use the same version of the game.</td>
113
- </tr>
114
- </table></p> 197e85843d<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Descubre el nuevo modo de juego de Stickman duelista supremo y reta a tus amigos en lnea.md DELETED
@@ -1,197 +0,0 @@
1
- <br />
2
- <h1>Descargar Stickman Duelista Supremo: Un juego de acción divertido y loco</h1>
3
- <p>¿Te gustan los juegos de acción con palitos? ¿Te divierte ver cómo se pelean entre sí con diferentes armas y física ragdoll? ¿Te apetece descargar un juego que te ofrezca horas de diversión y desafío? Entonces, te interesará conocer <strong>Stickman Duelista Supremo</strong>, un juego de peleas de palitos que te hará pasar un buen rato.</p>
4
- <h2>descargar stickman duelista supremo</h2><br /><p><b><b>Download</b> &#10037; <a href="https://urlin.us/2uSZIs">https://urlin.us/2uSZIs</a></b></p><br /><br />
5
- <p>En este artículo, te contaremos todo lo que necesitas saber sobre este juego, cómo descargarlo en tu PC, qué opinan los usuarios que lo han probado, qué consejos y trucos hay para jugar mejor y qué características tiene. ¡Sigue leyendo y descubre por qué <strong>Stickman Duelista Supremo</strong> es un juego que no te puedes perder!</p>
6
- <h2>¿Qué es Stickman Duelista Supremo?</h2>
7
- <p><strong>Stickman Duelista Supremo</strong> es un juego de acción desarrollado por Neron's Brother, un estudio independiente que se dedica a crear juegos divertidos y locos con palitos. El juego se lanzó en 2019 para dispositivos Android y desde entonces ha cosechado más de 100 millones de descargas y una puntuación media de 4.6 estrellas en Google Play.</p>
8
- <p>El juego se basa en el concepto de RHG (Rock Hard Gladiators), que consiste en peleas entre personajes animados hechos con palitos. El juego utiliza una física ragdoll, que hace que los movimientos y las reacciones de los personajes sean más realistas y cómicos al mismo tiempo.</p>
9
- <h3>Un juego de peleas de palitos con física ragdoll</h3>
10
- <p>La física ragdoll es una técnica que simula el comportamiento de los cuerpos humanos al ser golpeados o caer al suelo. En lugar de usar animaciones predefinidas, la física ragdoll calcula las fuerzas que actúan sobre cada parte del cuerpo y las hace reaccionar según la gravedad, la inercia y la elasticidad.</p>
11
- <p>Esto hace que las peleas sean más dinámicas e impredecibles, ya que nunca sabes cómo va a reaccionar tu oponente o tú mismo ante un golpe <p>Además, la física ragdoll hace que las peleas sean más divertidas y cómicas, ya que puedes ver cómo los personajes se retuercen, se estiran, se doblan y se desploman de formas muy graciosas. También puedes ver cómo las armas y los objetos del escenario interactúan con los personajes y los afectan de diferentes maneras.</p>
12
- <h3>Un juego con diferentes modos, mapas y armas</h3>
13
- <p><strong>Stickman Duelista Supremo</strong> te ofrece una gran variedad de opciones para que disfrutes de las peleas de palitos a tu gusto. Puedes elegir entre diferentes modos de juego, como el modo campaña, el modo supervivencia, el modo multijugador o el modo personalizado. Cada modo tiene sus propias reglas, objetivos y recompensas.</p>
14
- <p>También puedes elegir entre diferentes mapas, que tienen distintos escenarios, obstáculos y trampas. Algunos mapas son más grandes y abiertos, mientras que otros son más pequeños y cerrados. Algunos mapas tienen elementos interactivos, como botones, palancas o explosivos, que pueden cambiar el curso de la batalla.</p>
15
- <p>Por supuesto, no podemos olvidarnos de las armas, que son una parte esencial del juego. Hay más de 50 armas diferentes que puedes usar para atacar a tus enemigos, desde espadas, hachas y lanzas hasta pistolas, escopetas y rifles. Cada arma tiene sus propias características, como el alcance, el daño, la velocidad y la cadencia. Algunas armas también tienen efectos especiales, como el fuego, el hielo o la electricidad.</p>
16
- <p>descargar stickman duelista supremo apk<br />
17
- descargar stickman duelista supremo mod<br />
18
- descargar stickman duelista supremo para pc<br />
19
- descargar stickman duelista supremo hackeado<br />
20
- descargar stickman duelista supremo gratis<br />
21
- descargar stickman duelista supremo ultima version<br />
22
- descargar stickman duelista supremo online<br />
23
- descargar stickman duelista supremo sin internet<br />
24
- descargar stickman duelista supremo full<br />
25
- descargar stickman duelista supremo mega<br />
26
- descargar juego de stickman duelista supremo<br />
27
- como descargar stickman duelista supremo<br />
28
- donde descargar stickman duelista supremo<br />
29
- descargar e instalar stickman duelista supremo<br />
30
- descargar y jugar stickman duelista supremo<br />
31
- descargar stickman duelist supreme<br />
32
- download stickman duelist supreme apk<br />
33
- download stickman duelist supreme mod<br />
34
- download stickman duelist supreme for pc<br />
35
- download stickman duelist supreme hacked<br />
36
- download stickman duelist supreme free<br />
37
- download stickman duelist supreme latest version<br />
38
- download stickman duelist supreme online<br />
39
- download stickman duelist supreme offline<br />
40
- download stickman duelist supreme full<br />
41
- download stickman duelist supreme mega<br />
42
- download game of stickman duelist supreme<br />
43
- how to download stickman duelist supreme<br />
44
- where to download stickman duelist supreme<br />
45
- download and install stickman duelist supreme<br />
46
- download and play stickman duelist supreme<br />
47
- baixar stickman duelist supreme apk<br />
48
- baixar stickman duelist supreme mod<br />
49
- baixar stickman duelist supreme para pc<br />
50
- baixar stickman duelist supreme hackeado<br />
51
- baixar stickman duelist supreme gratis<br />
52
- baixar stickman duelist supreme ultima versao<br />
53
- baixar stickman duelist supreme online<br />
54
- baixar stickman duelist supreme offline<br />
55
- baixar stickman duelist supreme completo<br />
56
- baixar stickman duelist supreme mega<br />
57
- baixar jogo de stickman duelist supreme<br />
58
- como baixar stickman duelist supreme<br />
59
- onde baixar stickman duelist supreme<br />
60
- baixar e instalar stickman duelist supreme<br />
61
- baixar e jogar stickman duelist supreme</p>
62
- <h3>Un juego que se puede jugar solo o con amigos</h3>
63
- <p>Otra ventaja de <strong>Stickman Duelista Supremo</strong> es que se puede jugar tanto solo como con amigos. Si quieres jugar solo, puedes enfrentarte a la inteligencia artificial del juego, que tiene diferentes niveles de dificultad y personalidad. Puedes jugar al modo campaña, donde tendrás que superar diferentes misiones y desafíos, o al modo supervivencia, donde tendrás que resistir el mayor tiempo posible contra oleadas de enemigos.</p>
64
- <p>Si quieres jugar con amigos, puedes hacerlo de dos formas: online o local. Si juegas online, puedes conectarte con otros jugadores de todo el mundo y competir contra ellos o cooperar con ellos. Puedes jugar al modo multijugador, donde podrás crear o unirte a salas de juego con hasta 6 jugadores, o al modo personalizado, donde podrás crear tus propias reglas y condiciones.</p>
65
- <p>Si juegas local, puedes usar un solo dispositivo o varios dispositivos conectados por wifi o bluetooth. Puedes jugar al modo pantalla dividida, donde podrás compartir la pantalla con otro jugador y controlar cada uno a un personaje, o al modo multicontrolador, donde podrás usar varios dispositivos como mandos para controlar a los personajes en una sola pantalla.</p>
66
- <h2>¿Cómo descargar Stickman Duelista Supremo en PC?</h2>
67
- <p>Aunque <strong>Stickman Duelista Supremo</strong> es un juego diseñado para dispositivos Android, también se puede jugar en PC con la ayuda de un emulador. Un emulador es un programa que te permite ejecutar aplicaciones de Android en tu ordenador. De esta forma, podrás disfrutar del juego con una pantalla más grande, un mejor sonido y un mayor rendimiento.</p>
68
- <p>Hay muchos emuladores de Android disponibles en el mercado, pero nosotros te recomendamos tres opciones: GameLoop, BlueStacks y Google Play. A continuación te explicamos cómo descargar <strong>Stickman Duelista Supremo</strong> en PC usando cada uno de estos emuladores.</p>
69
- <h3>Usando GameLoop, un emulador de Android</h3>
70
- <p>GameLoop es un emulador de Android desarrollado por Tencent Games, una empresa china que se dedica a crear juegos populares como PUBG Mobile o Call of Duty Mobile. GameLoop está especializado en juegos de acción y ofrece una buena experiencia de juego con gráficos fluidos y controles personalizables.</p>
71
- <p>Para descargar <strong>Stickman Duelista Supremo</strong> en PC usando GameLoop, sigue estos pasos:</p>
72
- <ol>
73
- <li>Descarga e instala GameLoop desde su página web oficial <li>Ejecuta GameLoop y busca <strong>Stickman Duelista Supremo</strong> en la barra de búsqueda</li>
74
- <li>Selecciona el juego y haz clic en el botón de instalar</li>
75
- <li>Espera a que se complete la descarga y la instalación</li>
76
- <li>Abre el juego y disfruta de la acción</li>
77
- </ol>
78
- <h3>Usando BlueStacks, otro emulador de Android</h3>
79
- <p>BlueStacks es otro emulador de Android muy popular y usado por millones de usuarios. BlueStacks te permite jugar a cualquier juego o aplicación de Android en tu PC con una buena calidad y rendimiento. Además, tiene funciones adicionales como el modo multiventana, el mapeo de teclas o la grabación de pantalla.</p>
80
- <p>Para descargar <strong>Stickman Duelista Supremo</strong> en PC usando BlueStacks, sigue estos pasos:</p>
81
- <ol>
82
- <li>Descarga e instala BlueStacks desde su página web oficial</li>
83
- <li>Ejecuta BlueStacks y accede a tu cuenta de Google</li>
84
- <li>Busca <strong>Stickman Duelista Supremo</strong> en la tienda de Google Play que se encuentra dentro del emulador</li>
85
- <li>Selecciona el juego y haz clic en el botón de instalar</li>
86
- <li>Espera a que se complete la descarga y la instalación</li>
87
- <li>Abre el juego y diviértete con las peleas de palitos</li>
88
- </ol>
89
- <h3>Usando Google Play, la tienda oficial de aplicaciones de Android</h3>
90
- <p>Google Play es la tienda oficial de aplicaciones de Android, donde puedes encontrar millones de juegos y aplicaciones para tu dispositivo móvil. Google Play también tiene una versión web, que te permite acceder a la tienda desde tu navegador y descargar las aplicaciones en tu PC. Sin embargo, para poder usar esta opción, necesitas tener un dispositivo Android vinculado a tu cuenta de Google.</p>
91
- <p>Para descargar <strong>Stickman Duelista Supremo</strong> en PC usando Google Play, sigue estos pasos:</p>
92
- <ol>
93
- <li>Accede a la página web de Google Play desde tu navegador</li>
94
- <li>Inicia sesión con tu cuenta de Google</li>
95
- <li>Busca <strong>Stickman Duelista Supremo</strong> en la barra de búsqueda</li>
96
- <li>Selecciona el juego y haz clic en el botón de instalar</li>
97
- <li>Elije el dispositivo Android al que quieres enviar el juego (debe estar conectado a internet)</li>
98
- <li>Espera a que se complete la descarga y la instalación en tu dispositivo Android</li>
99
- <li>Copia el archivo APK del juego desde tu dispositivo Android a tu PC usando un cable USB o una conexión inalámbrica</li>
100
- <li>Ejecuta el archivo APK en tu PC usando un emulador como GameLoop o BlueStacks</li>
101
- <li>Lanza el juego y prepárate para la acción</li>
102
- </ol>
103
- <h2>¿Qué opinan los usuarios de Stickman Duelista Supremo?</h2>
104
- <p><strong>Stickman Duelista Supremo</strong> es un juego que ha recibido muchas opiniones positivas por parte de los usuarios que lo han jugado. La mayoría de los usuarios coinciden en que es un juego muy divertido, adictivo y desafiante, que ofrece una gran variedad de opciones y posibilidades. Sin embargo, también hay algunos usuarios que han encontrado algunos problemas o aspectos mejorables en el juego. A continuación te mostramos las ventajas y desventajas del juego según las reseñas de los usuarios.</p>
105
- <h3>Las ventajas del juego según las reseñas</h3>
106
- <p>Estas son algunas de las ventajas del juego que más han destacado los usuarios en sus reseñas:</p>
107
- <ul>
108
- <li><strong>La física ragdoll:</strong> muchos usuarios han alabado la física ragdoll del juego, que hace que las peleas sean más realistas y cómicas al mismo tiempo. Los usuarios han disfrutado viendo cómo los personajes se mueven y reaccionan ante los golpes y las caídas.</li>
109
- <li><strong>Los modos de juego:</strong> muchos usuarios han valorado positivamente los diferentes modos de juego que ofrece el juego, como el modo campaña, el modo supervivencia, el modo multijugador o el modo personalizado. Los usuarios han apreciado la variedad y la diversión que proporcionan cada uno de estos modos.</li>
110
- <li><strong <li><strong>Las armas y los mapas:</strong> muchos usuarios han elogiado las armas y los mapas que ofrece el juego, que son muy variados y originales. Los usuarios han disfrutado probando las diferentes armas y sus efectos, así como explorando los diferentes mapas y sus obstáculos.</li>
111
- <li><strong>El multijugador:</strong> muchos usuarios han destacado el multijugador como uno de los puntos fuertes del juego, ya que les permite jugar con sus amigos o con otros jugadores de todo el mundo. Los usuarios han valorado la posibilidad de crear o unirse a salas de juego, así como la opción de jugar en pantalla dividida o con varios dispositivos.</li>
112
- <li><strong>El editor de mapas y personajes:</strong> muchos usuarios han aplaudido el editor de mapas y personajes que tiene el juego, que les permite crear sus propios escenarios y personajes personalizados. Los usuarios han apreciado la libertad y la creatividad que les brinda esta función.</li>
113
- </ul>
114
- <h3>Las desventajas del juego según las reseñas</h3>
115
- <p>Estas son algunas de las desventajas del juego que más han mencionado los usuarios en sus reseñas:</p>
116
- <ul>
117
- <li><strong>Los anuncios:</strong> muchos usuarios se han quejado de los anuncios que aparecen en el juego, que son muy frecuentes e intrusivos. Los usuarios han expresado su molestia por tener que ver anuncios cada vez que terminan una partida o quieren acceder a alguna opción.</li>
118
- <li><strong>Los bugs:</strong> muchos usuarios han reportado bugs o errores en el juego, que afectan al funcionamiento o al rendimiento del mismo. Algunos ejemplos de bugs son: el juego se cierra solo, el juego se queda congelado, el juego no reconoce los controles, el juego no se conecta al multijugador, etc.</li>
119
- <li><strong>La dificultad:</strong> muchos usuarios han encontrado el juego demasiado difícil o injusto, especialmente en el modo campaña o contra los jefes. Los usuarios han criticado que los enemigos son muy fuertes y rápidos, que tienen armas muy poderosas y que tienen ventaja en algunos mapas.</li>
120
- <li><strong>La monotonía:</strong> algunos usuarios han opinado que el juego se vuelve monótono o aburrido después de un tiempo, ya que no hay mucha variedad o novedad en las peleas. Algunos usuarios han sugerido que se añadan más modos de juego, más armas, más mapas o más opciones de personalización.</li>
121
- </ul>
122
- <h3>La puntuación media del juego en diferentes plataformas</h3>
123
- <p>A pesar de las desventajas mencionadas, <strong>Stickman Duelista Supremo</strong> es un juego que ha recibido una puntuación media muy alta en diferentes plataformas. Estas son algunas de las puntuaciones que ha obtenido el juego en distintos sitios web:</p>
124
- <table>
125
- <tr>
126
- <th>Plataforma</th>
127
- <th>Puntuación</th>
128
- </tr>
129
- <tr>
130
- <td>Google Play</td>
131
- <td>4.6 / 5</td>
132
- </tr>
133
- <tr>
134
- <td>App Store</td>
135
- <td>No disponible</td>
136
- </tr>
137
- <tr>
138
- <td>Aptoide</td>
139
- <td>4.5 / 5</td>
140
- </tr>
141
- <tr>
142
- <td>MALAVIDA</td>
143
- <td>9 / 10</td>
144
- </tr>
145
- <tr>
146
- <td>Juegos Friv 2020</td>
147
- <td>4.2 / 5</td>
148
- </tr>
149
- </table>
150
- <h2>¿Qué consejos y trucos hay para jugar a Stickman Duelista Supremo?</h2>
151
- <p><strong>Stickman Duelista Supremo</strong> es un juego que requiere de habilidad, estrategia y reflejos para ganar las peleas. Si quieres mejorar tu nivel y vencer a tus rivales, te recomendamos seguir estos consejos y trucos:</p>
152
- <h3>Usar un arma que se adapte a tu estilo de juego</h3>
153
- <p>Cada arma tiene sus ventajas y desventajas, por lo que debes elegir la que mejor se adapte a tu forma de jugar. Por ejemplo, si te gusta atacar desde lejos y con precisión, puedes usar un rifle de francotirador o un arco. Si te gusta atacar desde cerca y con fuerza, puedes usar una espada o una mot sierra. Si te gusta atacar con rapidez y sorpresa, puedes usar una pistola o un cuchillo.</p>
154
- <h3>Practicar con bots difíciles para mejorar tus habilidades</h3>
155
- <p>Una buena forma de mejorar tus habilidades es practicar con bots difíciles, que te pondrán a prueba y te harán aprender de tus errores. Puedes jugar al modo supervivencia o al modo personalizado y elegir el nivel de dificultad de los bots, desde fácil hasta extremo. Así podrás entrenar tu puntería, tu esquiva, tu movimiento y tu estrategia.</p>
156
- <h3>Aprovechar las opciones de gravedad, KO instantáneo y escudo de energía</h3>
157
- <p>Otra forma de mejorar tu juego es aprovechar las opciones que te ofrece el juego, como la gravedad, el KO instantáneo y el escudo de energía. Estas opciones pueden cambiar el resultado de una pelea si las usas bien. Por ejemplo, puedes usar la gravedad para hacer que los objetos caigan sobre tus enemigos o para saltar más alto. Puedes usar el KO instantáneo para acabar con tus enemigos de un solo golpe o para evitar que te maten. Puedes usar el escudo de energía para protegerte de los ataques o para empujar a tus enemigos.</p>
158
- <h2>¿Qué características tiene Stickman Duelista Supremo?</h2>
159
- <p><strong>Stickman Duelista Supremo</strong> es un juego que tiene muchas características que lo hacen único y especial. Estas son algunas de las características que tiene el juego:</p>
160
- <h3>Un juego con gráficos 2D y música épica</h3>
161
- <p>El juego tiene unos gráficos 2D simples pero atractivos, que le dan un estilo propio y original. Los personajes, las armas y los objetos están hechos con palitos, lo que les da un aspecto divertido y caricaturesco. Los escenarios están llenos de detalles y colores, lo que les da un aspecto variado y dinámico. El juego también tiene una música épica que acompaña a las peleas, lo que les da un tono emocionante y dramático.</p>
162
- <h3>Un juego con editor de mapas y personajes personalizables</h3>
163
- <p>El juego tiene un editor de mapas y personajes personalizables, que te permite crear tus propios escenarios y personajes a tu gusto. Puedes elegir entre diferentes elementos, como el suelo, las paredes, los obstáculos, los objetos, las armas, los colores, las formas, etc. Puedes guardar tus creaciones y compartirlas con otros usuarios o jugar con ellas en el modo personalizado.</p>
164
- <h3>Un juego con torneo de jefes y modo mini juego de fútbol</h3>
165
- <p>El juego tiene un torneo de jefes y un modo mini juego de fútbol, que te ofrecen más diversión y desafío. El torneo de jefes consiste en enfrentarte a los jefes más poderosos del juego, que tienen habilidades especiales y armas únicas. El modo mini juego de fútbol consiste en jugar al fútbol con los personajes del juego, usando sus armas como balones.</p>
166
- <h2>Conclusión</h2>
167
- <p><strong>Stickman Duelista Supremo</strong> es un juego de acción divertido y loco, que te hará pasar un buen rato con las peleas de palitos. El juego tiene una física ragdoll que hace que las peleas sean más realistas y cómicas, diferentes modos de juego que te ofrecen variedad y diversión, diferentes mapas y armas que te ofrecen originalidad y posibilidades, un multijugador que te permite jugar con amigos o con otros jugadores de todo el mundo, un editor de mapas y personajes personalizables que te permite crear tus propios escenarios y personajes, un torneo de jefes y un modo mini juego de fútbol que te ofrecen más desafío y diversión.</p>
168
- <p>Si quieres descargar <strong>Stickman Duelista Supremo</strong> en PC, puedes hacerlo usando un emulador de Android como GameLoop, BlueStacks o Google Play. Así podrás disfrutar del juego con una pantalla más grande, un mejor sonido y un mayor rendimiento.</p>
169
- <p>Si quieres mejorar tu nivel y vencer a tus rivales , puedes seguir los consejos y trucos que te hemos dado, como usar un arma que se adapte a tu estilo de juego, practicar con bots difíciles para mejorar tus habilidades y aprovechar las opciones de gravedad, KO instantáneo y escudo de energía.</p>
170
- <p>En definitiva, <strong>Stickman Duelista Supremo</strong> es un juego que te recomendamos descargar y probar si te gustan los juegos de acción con palitos. Te aseguramos que no te arrepentirás y que te divertirás mucho con este juego.</p>
171
- <h2>Preguntas frecuentes</h2>
172
- <p>A continuación, te respondemos a algunas de las preguntas más frecuentes que tienen los usuarios sobre <strong>Stickman Duelista Supremo</strong>:</p>
173
- <ol>
174
- <li><strong>¿Es gratis Stickman Duelista Supremo?</strong></li>
175
- <p>Sí, <strong>Stickman Duelista Supremo</strong> es un juego totalmente gratis que puedes descargar y jugar sin pagar nada. Sin embargo, el juego tiene anuncios y compras integradas que puedes usar para obtener ventajas o personalizar el juego.</p>
176
- <li><strong>¿Es seguro Stickman Duelista Supremo?</strong></li>
177
- <p>Sí, <strong>Stickman Duelista Supremo</strong> es un juego seguro que no contiene virus ni malware. Sin embargo, debes tener cuidado con las fuentes desde las que descargas el juego, ya que algunas pueden ser fraudulentas o dañinas. Te recomendamos descargar el juego desde la tienda oficial de Google Play o desde sitios web confiables.</p>
178
- <li><strong>¿Es apto para niños Stickman Duelista Supremo?</strong></li>
179
- <p>No, <strong>Stickman Duelista Supremo</strong> no es un juego apto para niños, ya que contiene violencia, sangre y armas. El juego está clasificado como PEGI 16, lo que significa que no es adecuado para menores de 16 años. Si eres menor de edad, debes consultar con tus padres o tutores antes de jugar a este juego.</p>
180
- <li><strong>¿Qué requisitos tiene Stickman Duelista Supremo?</strong></li>
181
- <p><strong>Stickman Duelista Supremo</strong> es un juego que no tiene unos requisitos muy exigentes, por lo que puede funcionar en la mayoría de los dispositivos Android. Los requisitos mínimos son los siguientes:</p>
182
- <ul>
183
- <li>Sistema operativo: Android 4.4 o superior</li>
184
- <li>Memoria RAM: 1 GB o superior</li>
185
- <li>Espacio de almacenamiento: 100 MB o superior</li>
186
- <li>Conexión a internet: opcional (solo para el multijugador)</li>
187
- </ul>
188
- <li><strong>¿Cómo contactar con el desarrollador de Stickman Duelista Supremo?</strong></li>
189
- <p>Si tienes alguna duda, sugerencia o problema con el juego, puedes contactar con el desarrollador de <strong>Stickman Duelista Supremo</strong>, Neron's Brother, a través de los siguientes medios:</p>
190
- <ul>
191
- <li>Email: [email protected]</li>
192
- <li>Facebook: https://www.facebook.com/neronsbrother/</li>
193
- <li>Instagram: https://www.instagram.com/neronsbrother/</li>
194
- <li>YouTube: https://www.youtube.com/channel/UCJ1Nero6f3cLZfZ4GkMj11g</li>
195
- </ul></p> 197e85843d<br />
196
- <br />
197
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cleaner How to Remove Unwanted Files and Boost Your System Performance.md DELETED
@@ -1,147 +0,0 @@
1
- <br />
2
- <h1>How to Choose the Best Cleaner for Your Needs</h1>
3
- <p>Cleaners are substances or devices that are used to remove dirt, stains, grease, germs, and other unwanted materials from various surfaces and objects. They are essential for maintaining hygiene, health, beauty, and functionality in our homes, workplaces, and public spaces. However, not all cleaners are created equal. There are many types of cleaners available in the market, each with different purposes, ingredients, benefits, and drawbacks. How do you know which one is best for your needs?</p>
4
- <p>In this article, we will provide you with some information on how to choose the best cleaner for your needs, based on some reliable sources we found on the web. We will also discuss some of the benefits and risks of using cleaners, as well as some tips for using them safely and effectively.</p>
5
- <h2>cleaner</h2><br /><p><b><b>Download Zip</b> &raquo;&raquo;&raquo; <a href="https://jinyurl.com/2uNPf9">https://jinyurl.com/2uNPf9</a></b></p><br /><br />
6
- <h2>How to Choose the Best Cleaner</h2>
7
- <p>Choosing the best cleaner depends on several factors, such as:</p>
8
- <ul>
9
- <li>The type of surface or object you want to clean</li>
10
- <li>The type and level of dirt or stain you want to remove</li>
11
- <li>The safety and environmental impact of the cleaner</li>
12
- <li>The cost and availability of the cleaner</li>
13
- <li>The ease of use and effectiveness of the cleaner</li>
14
- </ul>
15
- <p>Let's look at each factor in more detail.</p>
16
- <h3>The type of surface or object you want to clean</h3>
17
- <p>Different surfaces or objects may require different types of cleaners. For example:</p>
18
- <p>best cleaning company in Sydney<br />
19
- where to find a reliable commercial cleaner<br />
20
- black thin silicon case for iphone xs<br />
21
- how to find a good cleaning company<br />
22
- best and most affordable cleaning company in Sydney<br />
23
- commercial cleaning provider near me<br />
24
- top keywords for cleaning business<br />
25
- how to use a steam cleaner for carpets<br />
26
- natural cleaner for stainless steel appliances<br />
27
- best vacuum cleaner for pet hair and hardwood floors<br />
28
- how to make your own cleaner with vinegar<br />
29
- best cleaner for laminate wood floors<br />
30
- professional cleaner for air ducts<br />
31
- best cleaner for leather sofa<br />
32
- how to hire a cleaner for Airbnb<br />
33
- best cleaner for tile and grout<br />
34
- eco-friendly cleaner for bathroom<br />
35
- best cleaner for oven racks<br />
36
- how to become a cleaner in Australia<br />
37
- best cleaner for windows and mirrors<br />
38
- how to clean a coffee maker with vinegar<br />
39
- best cleaner for granite countertops<br />
40
- how to get rid of mold with bleach<br />
41
- best cleaner for hardwood floors and furniture<br />
42
- how to disinfect a mattress with baking soda<br />
43
- best cleaner for shower doors and walls<br />
44
- how to clean a dishwasher with lemon<br />
45
- best cleaner for car interior and exterior<br />
46
- how to remove stains from carpet with hydrogen peroxide<br />
47
- best cleaner for vinyl plank flooring<br />
48
- how to clean a microwave with vinegar and water<br />
49
- best cleaner for stainless steel cookware<br />
50
- how to polish silver with toothpaste<br />
51
- best cleaner for glass stove top<br />
52
- how to clean a washing machine with baking soda and vinegar<br />
53
- best cleaner for brass and copper<br />
54
- how to clean a dryer vent with a leaf blower<br />
55
- best cleaner for marble countertops and floors<br />
56
- how to clean a toilet with coke and bleach<br />
57
- best cleaner for concrete driveway and patio</p>
58
- <ul>
59
- <li>Hard surfaces such as floors, countertops, tiles, glass, metal, wood, plastic, etc. may need detergents , degreasers , abrasives , or acids . Detergents are synthetic substances that can dissolve oils , hold dirt in suspension , and act as wetting agents . Degreasers are alkaline substances that can remove organic soils such as fats , oils , and proteins . Abrasives are substances that can scrub off hard-to-remove soils by friction . Acids are substances that can dissolve mineral deposits such as lime , rust , soap scum , etc.</li>
60
- <li>Soft surfaces such as carpets , rugs , upholstery , curtains , etc. may need special cleaners designed for fabrics . These cleaners may contain enzymes , solvents , surfactants , or bleaches . Enzymes are biological substances that can break down organic stains such as blood , urine , food , etc. Solvents are liquid substances that can dissolve oily stains such as grease , wax , ink , etc. Surfact <p>Surfactants are substances that can lower the surface tension of water and help remove dirt and stains. Bleaches are substances that can whiten or lighten the color of fabrics by oxidizing or reducing the color molecules.</li>
61
- <li>Electrical or electronic devices such as computers, phones, TVs, etc. may need special cleaners designed for electronics. These cleaners may contain isopropyl alcohol, compressed air, microfiber cloths, or anti-static brushes. Isopropyl alcohol is a solvent that can remove dust, dirt, and fingerprints without damaging the circuitry. Compressed air is a gas that can blow away dust and debris from hard-to-reach areas. Microfiber cloths are soft and lint-free materials that can wipe away dirt and smudges without scratching the surface. Anti-static brushes are tools that can remove dust and static electricity from sensitive components.</li>
62
- </ul>
63
- <p>It is important to use the right type of cleaner for the right type of surface or object, as using the wrong one may cause damage or ineffective cleaning.</p>
64
- <h3>The type and level of dirt or stain you want to remove</h3>
65
- <p>Different types and levels of dirt or stains may require different types and strengths of cleaners. For example:</p>
66
- <ul>
67
- <li>Light or moderate dirt or stains may be removed by mild or general-purpose cleaners such as dish soap, all-purpose cleaner, vinegar, baking soda, etc. These cleaners are usually safe, cheap, and easy to use.</li>
68
- <li>Heavy or stubborn dirt or stains may require stronger or specialized cleaners such as bleach, ammonia, oven cleaner, rust remover, etc. These cleaners are usually more effective, but also more hazardous, expensive, and difficult to use.</li>
69
- <li>Specific types of dirt or stains such as mold, mildew, blood, ink, wine, coffee, etc. may require specific types of cleaners such as fungicide, hydrogen peroxide, rubbing alcohol, stain remover, etc. These cleaners are usually designed to target the particular source or nature of the dirt or stain.</li>
70
- </ul>
71
- <p>It is important to use the right type and strength of cleaner for the right type and level of dirt or stain, as using the wrong one may cause insufficient cleaning or unnecessary harm.</p>
72
- <h3>The safety and environmental impact of the cleaner</h3>
73
- <p>Different cleaners may have different effects on your health and the environment. For example:</p>
74
- <ul>
75
- <li>Natural or organic cleaners such as vinegar, lemon juice, salt, etc. are usually safe for humans and animals, as they are derived from natural sources and do not contain harmful chemicals. They are also biodegradable and eco-friendly, as they do not pollute the water or soil.</li>
76
- <li>Synthetic or chemical cleaners such as bleach, ammonia, chlorine, etc. are usually harmful for humans and animals, as they contain toxic substances that can cause irritation, allergy, poisoning, or cancer. They are also non-biodegradable and eco-unfriendly, as they can contaminate the water or soil.</li>
77
- </ul>
78
- <p>It is important to use the safest and most environmentally friendly cleaner possible for your needs, as using a harmful one may cause health problems or environmental damage.</p>
79
- <h3>The cost and availability of the cleaner</h3>
80
- <p>Different cleaners may have different prices and availability in the market. For example:</p>
81
- <ul>
82
- <li>Common or popular cleaners such as dish soap, all-purpose cleaner, bleach, etc. are usually cheap and easy to find in most stores, as they are widely used and produced by many brands and manufacturers . </li>
83
- <li>Rare or niche cleaners such as enzyme cleaner, silver polish, leather conditioner, etc. are usually expensive and hard to find in most stores, as they are rarely used and produced by few brands and manufacturers . </li>
84
- </ul>
85
- <p>It is important to use the most affordable and accessible cleaner possible for your needs, as using an expensive or scarce one may cause financial burden or inconvenience .</p>
86
- <h3>The ease of use and effectiveness of the cleaner</h3>
87
- <p>Different cleaners may have different levels of ease of use and effectiveness in cleaning. For example:</p>
88
- <ul>
89
- <li>Simple or convenient cleaners such as wipes, sprays, pods, etc. are usually easy to use and effective in cleaning, as they require minimal preparation and application . </li>
90
- <li>Complex or cumbersome cleaners such as powders, liquids, gels, etc. are usually difficult to use and less effective in cleaning, as they require more preparation and application . </li>
91
- </ul>
92
- <p>It is important to use the easiest and most effective cleaner possible for your needs, as using a difficult or less effective one may cause frustration or dissatisfaction .</p>
93
- <h2>Benefits of Using Cleaners</h2 <p>Using cleaners can have many benefits for you and your surroundings. Some of the benefits are:</p>
94
- <ul>
95
- <li>Reduce stress and improve mental health by creating a clean and organized environment. Studies have shown that clutter and dirt can increase anxiety, depression, and fatigue, while cleanliness and order can enhance mood, productivity, and creativity .</li>
96
- <li>Improve physical health by preventing germs, allergens, and infections. Cleaners can kill or remove harmful microorganisms such as bacteria, viruses, fungi, and parasites that can cause diseases such as colds, flu, food poisoning, skin infections, etc. They can also reduce or eliminate allergens such as dust mites, pollen, pet dander, etc. that can trigger allergic reactions such as asthma, hay fever, eczema, etc.</li>
97
- <li>Enhance appearance and functionality of surfaces and objects by removing dirt and stains. Cleaners can restore or maintain the original color, shine, texture, and shape of various surfaces and objects such as floors, countertops, furniture, clothes, etc. They can also improve or preserve the performance, durability, and efficiency of various devices and appliances such as computers, phones, TVs, etc.</li>
98
- <li>Save time and money by avoiding repairs and replacements. Cleaners can prevent or delay the wear and tear of surfaces and objects by removing corrosive or abrasive substances such as rust, lime, grease, etc. They can also extend the lifespan or warranty of devices and appliances by removing dust or debris that can cause overheating or malfunctioning.</li>
99
- </ul>
100
- <h2>Risks of Using Cleaners</h2>
101
- <p>Using cleaners can also have some risks for you and your surroundings. Some of the risks are:</p>
102
- <ul>
103
- <li>Harm health by causing irritation, allergy, poisoning, or cancer. Cleaners can contain or produce hazardous chemicals such as bleach, ammonia, chlorine, formaldehyde, etc. that can irritate or damage the skin, eyes, nose, throat, lungs, etc. They can also cause allergic reactions such as rashes, hives, swelling, etc. in some people who are sensitive to certain ingredients. They can also cause poisoning or cancer if ingested or inhaled in large amounts or over a long period of time.</li>
104
- <li>Damage surfaces or objects by causing corrosion, discoloration, or deterioration . Cleaners can react or interact with certain materials such as metal, wood, plastic, etc. and cause them to rust, fade, crack, etc. They can also leave behind residues or marks that can affect the appearance or quality of the surface or object . </li>
105
- <li>Harm environment by causing pollution, waste, or depletion of resources . Cleaners can contaminate or pollute the water, soil, or air with harmful chemicals or microplastics that can harm the wildlife or ecosystem . They can also generate or contribute to the waste or landfill problem by using non-recyclable or non-biodegradable packaging or materials . They can also consume or deplete the natural resources such as water, oil, gas, etc. that are used to produce them.</li>
106
- </ul>
107
- <h2>Tips for Using Cleaners Safely and Effectively</h2>
108
- <p>To minimize the risks and maximize the benefits of using cleaners, you should follow some tips for using them safely and effectively. Some of the tips are:</p>
109
- <ul>
110
- <li>Read and follow the instructions and warnings on the label. The label will tell you how to use, store, and dispose of the cleaner properly. It will also tell you what precautions to take, what hazards to avoid, and what to do in case of an emergency.</li>
111
- <li>Wear protective gear such as gloves, goggles, and masks. These will protect your skin, eyes, and respiratory system from exposure to harmful chemicals or substances.</li>
112
- <li>Use the right amount and method of application. Using too much or too little cleaner may cause ineffective cleaning or unnecessary harm. Using the wrong method of application may cause uneven cleaning or damage to the surface or object. Follow the directions on the label or use common sense to determine the best amount and method of application for your needs.</li>
113
- <li>Test the cleaner on a small or hidden area before using it on a large or visible area. This will help you check if the cleaner is suitable for the surface or object you want to clean. It will also help you avoid any unwanted reactions or effects such as discoloration , damage , or allergy .</li>
114
- <li>Rinse and dry the surface or object after cleaning. This will help you remove any excess cleaner , dirt , or residue that may remain on the surface or object . It will also help you prevent any further reactions or effects such as corrosion, irritation, or odor.</li>
115
- </ul>
116
- <h2>Conclusion</h2>
117
- <p>Cleaners are useful and important for keeping our surroundings clean and healthy. However, they are not all the same. There are many types of cleaners available in the market, each with different purposes, ingredients, benefits, and drawbacks. To choose the best cleaner for your needs, you should consider the following factors:</p>
118
- <ul>
119
- <li>The type of surface or object you want to clean</li>
120
- <li>The type and level of dirt or stain you want to remove</li>
121
- <li>The safety and environmental impact of the cleaner</li>
122
- <li>The cost and availability of the cleaner</li>
123
- <li>The ease of use and effectiveness of the cleaner</li>
124
- </ul>
125
- <p>You should also follow some tips for using cleaners safely and effectively, such as reading and following the instructions and warnings on the label, wearing protective gear, using the right amount and method of application, testing the cleaner on a small or hidden area, and rinsing and drying the surface or object after cleaning.</p>
126
- <p>By choosing and using cleaners wisely, you can enjoy the benefits of having a clean and healthy environment, while avoiding the risks of harming yourself or your surroundings.</p>
127
- <h2>FAQs</h2>
128
- <p>Here are some frequently asked questions about cleaners:</p>
129
- <h3>What is the difference between cleaning and disinfecting?</h3>
130
- <p>Cleaning is the process of removing dirt, dust, stains, and other visible impurities from surfaces or objects. Disinfecting is the process of killing or inactivating germs, such as bacteria, viruses, fungi, and parasites, that can cause diseases. Cleaning does not necessarily disinfect, and disinfecting does not necessarily clean. Therefore, it is recommended to do both cleaning and disinfecting for optimal hygiene.</p>
131
- <h3>What are some natural or homemade cleaners?</h3>
132
- <p>Some natural or homemade cleaners are vinegar, lemon juice, baking soda, salt, hydrogen peroxide, tea tree oil, etc. These substances can be used alone or in combination to clean various surfaces or objects. They are usually safe, cheap, and eco-friendly. However, they may not be as effective as synthetic or chemical cleaners for some types of dirt or stains. They may also have some drawbacks such as unpleasant smell, limited shelf life, or potential reactions with certain materials.</p>
133
- <h3>How do I dispose of cleaners properly?</h3>
134
- <p>To dispose of cleaners properly, you should follow the instructions and warnings on the label. Some cleaners can be poured down the drain with plenty of water. Some cleaners need to be neutralized with another substance before disposal. Some cleaners need to be taken to a hazardous waste collection site or facility. You should never mix different cleaners together, as they may cause dangerous reactions. You should also recycle or reuse the containers or packaging of the cleaners if possible.</p>
135
- <h3>How do I store cleaners safely?</h3>
136
- <p>To store cleaners safely, you should keep them in their original containers with their labels intact. You should keep them away from children, pets, food, heat, fire, or sunlight. You should store them in a cool, dry, and well-ventilated place. You should also keep them separate from each other, especially those that may react with each other.</p>
137
- <h3>How do I make cleaners more effective?</h3>
138
- <p>To make cleaners more effective, you should follow some tips such as:</p>
139
- <ul>
140
- <li>Pre-treat the surface or object with a cleaner before cleaning. This will help loosen or dissolve the dirt or stain and make it easier to remove.</li>
141
- <li>Use hot water or steam to clean. This will help activate or enhance the cleaning power of the cleaner and kill or remove more germs.</li>
142
- <li>Use a brush, sponge, cloth, or other tool to scrub or wipe the surface or object. This will help remove more dirt or stain by friction or absorption.</li>
143
- <li>Use a timer or stopwatch to monitor the cleaning time. This will help you follow the recommended contact time or dwell time of the cleaner for optimal results.</li>
144
- <li>Use a rinse aid or a dryer to finish the cleaning. This will help prevent water spots, streaks, or residues from forming on the surface or object.</li>
145
- </ul></p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/layers_123821KB.py DELETED
@@ -1,118 +0,0 @@
1
- import torch
2
- from torch import nn
3
- import torch.nn.functional as F
4
-
5
- from . import spec_utils
6
-
7
-
8
- class Conv2DBNActiv(nn.Module):
9
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
10
- super(Conv2DBNActiv, self).__init__()
11
- self.conv = nn.Sequential(
12
- nn.Conv2d(
13
- nin,
14
- nout,
15
- kernel_size=ksize,
16
- stride=stride,
17
- padding=pad,
18
- dilation=dilation,
19
- bias=False,
20
- ),
21
- nn.BatchNorm2d(nout),
22
- activ(),
23
- )
24
-
25
- def __call__(self, x):
26
- return self.conv(x)
27
-
28
-
29
- class SeperableConv2DBNActiv(nn.Module):
30
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
31
- super(SeperableConv2DBNActiv, self).__init__()
32
- self.conv = nn.Sequential(
33
- nn.Conv2d(
34
- nin,
35
- nin,
36
- kernel_size=ksize,
37
- stride=stride,
38
- padding=pad,
39
- dilation=dilation,
40
- groups=nin,
41
- bias=False,
42
- ),
43
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
44
- nn.BatchNorm2d(nout),
45
- activ(),
46
- )
47
-
48
- def __call__(self, x):
49
- return self.conv(x)
50
-
51
-
52
- class Encoder(nn.Module):
53
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
54
- super(Encoder, self).__init__()
55
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
56
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
57
-
58
- def __call__(self, x):
59
- skip = self.conv1(x)
60
- h = self.conv2(skip)
61
-
62
- return h, skip
63
-
64
-
65
- class Decoder(nn.Module):
66
- def __init__(
67
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
68
- ):
69
- super(Decoder, self).__init__()
70
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
71
- self.dropout = nn.Dropout2d(0.1) if dropout else None
72
-
73
- def __call__(self, x, skip=None):
74
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
75
- if skip is not None:
76
- skip = spec_utils.crop_center(skip, x)
77
- x = torch.cat([x, skip], dim=1)
78
- h = self.conv(x)
79
-
80
- if self.dropout is not None:
81
- h = self.dropout(h)
82
-
83
- return h
84
-
85
-
86
- class ASPPModule(nn.Module):
87
- def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU):
88
- super(ASPPModule, self).__init__()
89
- self.conv1 = nn.Sequential(
90
- nn.AdaptiveAvgPool2d((1, None)),
91
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
92
- )
93
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
94
- self.conv3 = SeperableConv2DBNActiv(
95
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
96
- )
97
- self.conv4 = SeperableConv2DBNActiv(
98
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
99
- )
100
- self.conv5 = SeperableConv2DBNActiv(
101
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
102
- )
103
- self.bottleneck = nn.Sequential(
104
- Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
105
- )
106
-
107
- def forward(self, x):
108
- _, _, h, w = x.size()
109
- feat1 = F.interpolate(
110
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
111
- )
112
- feat2 = self.conv2(x)
113
- feat3 = self.conv3(x)
114
- feat4 = self.conv4(x)
115
- feat5 = self.conv5(x)
116
- out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1)
117
- bottle = self.bottleneck(out)
118
- return bottle
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/utils/samples/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/metrics.py DELETED
@@ -1,69 +0,0 @@
1
- import logging
2
-
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from sklearn.metrics import average_precision_score, roc_auc_score
7
-
8
- logger = logging.getLogger(f'main.{__name__}')
9
-
10
- def metrics(targets, outputs, topk=(1, 5)):
11
- """
12
- Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py
13
-
14
- Calculate statistics including mAP, AUC, and d-prime.
15
- Args:
16
- output: 2d tensors, (dataset_size, classes_num) - before softmax
17
- target: 1d tensors, (dataset_size, )
18
- topk: tuple
19
- Returns:
20
- metric_dict: a dict of metrics
21
- """
22
- metrics_dict = dict()
23
-
24
- num_cls = outputs.shape[-1]
25
-
26
- # accuracy@k
27
- _, preds = torch.topk(outputs, k=max(topk), dim=1)
28
- correct_for_maxtopk = preds == targets.view(-1, 1).expand_as(preds)
29
- for k in topk:
30
- metrics_dict[f'accuracy_{k}'] = float(correct_for_maxtopk[:, :k].sum() / correct_for_maxtopk.shape[0])
31
-
32
- # avg precision, average roc_auc, and dprime
33
- targets = torch.nn.functional.one_hot(targets, num_classes=num_cls)
34
-
35
- # ids of the predicted classes (same as softmax)
36
- targets_pred = torch.softmax(outputs, dim=1)
37
-
38
- targets = targets.numpy()
39
- targets_pred = targets_pred.numpy()
40
-
41
- # one-vs-rest
42
- avg_p = [average_precision_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
43
- try:
44
- roc_aucs = [roc_auc_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
45
- except ValueError:
46
- logger.warning('Weird... Some classes never occured in targets. Do not trust the metrics.')
47
- roc_aucs = np.array([0.5])
48
- avg_p = np.array([0])
49
-
50
- metrics_dict['mAP'] = np.mean(avg_p)
51
- metrics_dict['mROCAUC'] = np.mean(roc_aucs)
52
- # Percent point function (ppf) (inverse of cdf — percentiles).
53
- metrics_dict['dprime'] = scipy.stats.norm().ppf(metrics_dict['mROCAUC']) * np.sqrt(2)
54
-
55
- return metrics_dict
56
-
57
-
58
- if __name__ == '__main__':
59
- targets = torch.tensor([3, 3, 1, 2, 1, 0])
60
- outputs = torch.tensor([
61
- [1.2, 1.3, 1.1, 1.5],
62
- [1.3, 1.4, 1.0, 1.1],
63
- [1.5, 1.1, 1.4, 1.3],
64
- [1.0, 1.2, 1.4, 1.5],
65
- [1.2, 1.3, 1.1, 1.1],
66
- [1.2, 1.1, 1.1, 1.1],
67
- ]).float()
68
- metrics_dict = metrics(targets, outputs, topk=(1, 3))
69
- print(metrics_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/losses_audio/vggishish/logger.py DELETED
@@ -1,87 +0,0 @@
1
- import logging
2
- import os
3
- import time
4
- from shutil import copytree, ignore_patterns
5
-
6
- import torch
7
- from omegaconf import OmegaConf
8
- from torch.utils.tensorboard import SummaryWriter, summary
9
-
10
-
11
- class LoggerWithTBoard(SummaryWriter):
12
-
13
- def __init__(self, cfg):
14
- # current time stamp and experiment log directory
15
- self.start_time = time.strftime('%y-%m-%dT%H-%M-%S', time.localtime())
16
- self.logdir = os.path.join(cfg.logdir, self.start_time)
17
- # init tboard
18
- super().__init__(self.logdir)
19
- # backup the cfg
20
- OmegaConf.save(cfg, os.path.join(self.log_dir, 'cfg.yaml'))
21
- # backup the code state
22
- if cfg.log_code_state:
23
- dest_dir = os.path.join(self.logdir, 'code')
24
- copytree(os.getcwd(), dest_dir, ignore=ignore_patterns(*cfg.patterns_to_ignore))
25
-
26
- # init logger which handles printing and logging mostly same things to the log file
27
- self.print_logger = logging.getLogger('main')
28
- self.print_logger.setLevel(logging.INFO)
29
- msgfmt = '[%(levelname)s] %(asctime)s - %(name)s \n %(message)s'
30
- datefmt = '%d %b %Y %H:%M:%S'
31
- formatter = logging.Formatter(msgfmt, datefmt)
32
- # stdout
33
- sh = logging.StreamHandler()
34
- sh.setLevel(logging.DEBUG)
35
- sh.setFormatter(formatter)
36
- self.print_logger.addHandler(sh)
37
- # log file
38
- fh = logging.FileHandler(os.path.join(self.log_dir, 'log.txt'))
39
- fh.setLevel(logging.INFO)
40
- fh.setFormatter(formatter)
41
- self.print_logger.addHandler(fh)
42
-
43
- self.print_logger.info(f'Saving logs and checkpoints @ {self.logdir}')
44
-
45
- def log_param_num(self, model):
46
- param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
47
- self.print_logger.info(f'The number of parameters: {param_num/1e+6:.3f} mil')
48
- self.add_scalar('num_params', param_num, 0)
49
- return param_num
50
-
51
- def log_iter_loss(self, loss, iter, phase):
52
- self.add_scalar(f'{phase}/loss_iter', loss, iter)
53
-
54
- def log_epoch_loss(self, loss, epoch, phase):
55
- self.add_scalar(f'{phase}/loss', loss, epoch)
56
- self.print_logger.info(f'{phase} ({epoch}): loss {loss:.3f};')
57
-
58
- def log_epoch_metrics(self, metrics_dict, epoch, phase):
59
- for metric, val in metrics_dict.items():
60
- self.add_scalar(f'{phase}/{metric}', val, epoch)
61
- metrics_dict = {k: round(v, 4) for k, v in metrics_dict.items()}
62
- self.print_logger.info(f'{phase} ({epoch}) metrics: {metrics_dict};')
63
-
64
- def log_test_metrics(self, metrics_dict, hparams_dict, best_epoch):
65
- allowed_types = (int, float, str, bool, torch.Tensor)
66
- hparams_dict = {k: v for k, v in hparams_dict.items() if isinstance(v, allowed_types)}
67
- metrics_dict = {f'test/{k}': round(v, 4) for k, v in metrics_dict.items()}
68
- exp, ssi, sei = summary.hparams(hparams_dict, metrics_dict)
69
- self.file_writer.add_summary(exp)
70
- self.file_writer.add_summary(ssi)
71
- self.file_writer.add_summary(sei)
72
- for k, v in metrics_dict.items():
73
- self.add_scalar(k, v, best_epoch)
74
- self.print_logger.info(f'test ({best_epoch}) metrics: {metrics_dict};')
75
-
76
- def log_best_model(self, model, loss, epoch, optimizer, metrics_dict):
77
- model_name = model.__class__.__name__
78
- self.best_model_path = os.path.join(self.logdir, f'{model_name}-{self.start_time}.pt')
79
- checkpoint = {
80
- 'loss': loss,
81
- 'metrics': metrics_dict,
82
- 'epoch': epoch,
83
- 'optimizer': optimizer.state_dict(),
84
- 'model': model.state_dict(),
85
- }
86
- torch.save(checkpoint, self.best_model_path)
87
- self.print_logger.info(f'Saved model in {self.best_model_path}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2HeroBootcamp/StaticHTML5Playcanvas/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: StaticHTML5Playcanvas
3
- emoji: 👀
4
- colorFrom: gray
5
- colorTo: red
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/midas/midas_net_custom.py DELETED
@@ -1,128 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet_small(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
17
- blocks={'expand': True}):
18
- """Init.
19
-
20
- Args:
21
- path (str, optional): Path to saved model. Defaults to None.
22
- features (int, optional): Number of features. Defaults to 256.
23
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
24
- """
25
- print("Loading weights: ", path)
26
-
27
- super(MidasNet_small, self).__init__()
28
-
29
- use_pretrained = False if path else True
30
-
31
- self.channels_last = channels_last
32
- self.blocks = blocks
33
- self.backbone = backbone
34
-
35
- self.groups = 1
36
-
37
- features1=features
38
- features2=features
39
- features3=features
40
- features4=features
41
- self.expand = False
42
- if "expand" in self.blocks and self.blocks['expand'] == True:
43
- self.expand = True
44
- features1=features
45
- features2=features*2
46
- features3=features*4
47
- features4=features*8
48
-
49
- self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
50
-
51
- self.scratch.activation = nn.ReLU(False)
52
-
53
- self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
54
- self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
55
- self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
56
- self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
57
-
58
-
59
- self.scratch.output_conv = nn.Sequential(
60
- nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
61
- Interpolate(scale_factor=2, mode="bilinear"),
62
- nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
63
- self.scratch.activation,
64
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
65
- nn.ReLU(True) if non_negative else nn.Identity(),
66
- nn.Identity(),
67
- )
68
-
69
- if path:
70
- self.load(path)
71
-
72
-
73
- def forward(self, x):
74
- """Forward pass.
75
-
76
- Args:
77
- x (tensor): input data (image)
78
-
79
- Returns:
80
- tensor: depth
81
- """
82
- if self.channels_last==True:
83
- print("self.channels_last = ", self.channels_last)
84
- x.contiguous(memory_format=torch.channels_last)
85
-
86
-
87
- layer_1 = self.pretrained.layer1(x)
88
- layer_2 = self.pretrained.layer2(layer_1)
89
- layer_3 = self.pretrained.layer3(layer_2)
90
- layer_4 = self.pretrained.layer4(layer_3)
91
-
92
- layer_1_rn = self.scratch.layer1_rn(layer_1)
93
- layer_2_rn = self.scratch.layer2_rn(layer_2)
94
- layer_3_rn = self.scratch.layer3_rn(layer_3)
95
- layer_4_rn = self.scratch.layer4_rn(layer_4)
96
-
97
-
98
- path_4 = self.scratch.refinenet4(layer_4_rn)
99
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
100
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
101
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
102
-
103
- out = self.scratch.output_conv(path_1)
104
-
105
- return torch.squeeze(out, dim=1)
106
-
107
-
108
-
109
- def fuse_model(m):
110
- prev_previous_type = nn.Identity()
111
- prev_previous_name = ''
112
- previous_type = nn.Identity()
113
- previous_name = ''
114
- for name, module in m.named_modules():
115
- if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
116
- # print("FUSED ", prev_previous_name, previous_name, name)
117
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
118
- elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
119
- # print("FUSED ", prev_previous_name, previous_name)
120
- torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
121
- # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
122
- # print("FUSED ", previous_name, name)
123
- # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
124
-
125
- prev_previous_type = previous_type
126
- prev_previous_name = previous_name
127
- previous_type = type(module)
128
- previous_name = name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/checkbox/Checkbox.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Checkbox from '../../../plugins/checkbox';
2
- export default Checkbox;
 
 
 
spaces/AkitoP/umamusume_bert_vits2/utils.py DELETED
@@ -1,356 +0,0 @@
1
- import os
2
- import glob
3
- import argparse
4
- import logging
5
- import json
6
- import subprocess
7
- import numpy as np
8
- from scipy.io.wavfile import read
9
- import torch
10
-
11
- MATPLOTLIB_FLAG = False
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False):
17
- assert os.path.isfile(checkpoint_path)
18
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
19
- iteration = checkpoint_dict["iteration"]
20
- learning_rate = checkpoint_dict["learning_rate"]
21
- if (
22
- optimizer is not None
23
- and not skip_optimizer
24
- and checkpoint_dict["optimizer"] is not None
25
- ):
26
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
27
- elif optimizer is None and not skip_optimizer:
28
- # else: Disable this line if Infer and resume checkpoint,then enable the line upper
29
- new_opt_dict = optimizer.state_dict()
30
- new_opt_dict_params = new_opt_dict["param_groups"][0]["params"]
31
- new_opt_dict["param_groups"] = checkpoint_dict["optimizer"]["param_groups"]
32
- new_opt_dict["param_groups"][0]["params"] = new_opt_dict_params
33
- optimizer.load_state_dict(new_opt_dict)
34
-
35
- saved_state_dict = checkpoint_dict["model"]
36
- if hasattr(model, "module"):
37
- state_dict = model.module.state_dict()
38
- else:
39
- state_dict = model.state_dict()
40
-
41
- new_state_dict = {}
42
- for k, v in state_dict.items():
43
- try:
44
- # assert "emb_g" not in k
45
- new_state_dict[k] = saved_state_dict[k]
46
- assert saved_state_dict[k].shape == v.shape, (
47
- saved_state_dict[k].shape,
48
- v.shape,
49
- )
50
- except:
51
- # For upgrading from the old version
52
- if "ja_bert_proj" in k:
53
- v = torch.zeros_like(v)
54
- logger.warn(
55
- f"Seems you are using the old version of the model, the {k} is automatically set to zero for backward compatibility"
56
- )
57
- else:
58
- logger.error(f"{k} is not in the checkpoint")
59
-
60
- new_state_dict[k] = v
61
-
62
- if hasattr(model, "module"):
63
- model.module.load_state_dict(new_state_dict, strict=False)
64
- else:
65
- model.load_state_dict(new_state_dict, strict=False)
66
-
67
- logger.info(
68
- "Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration)
69
- )
70
-
71
- return model, optimizer, learning_rate, iteration
72
-
73
-
74
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
75
- logger.info(
76
- "Saving model and optimizer state at iteration {} to {}".format(
77
- iteration, checkpoint_path
78
- )
79
- )
80
- if hasattr(model, "module"):
81
- state_dict = model.module.state_dict()
82
- else:
83
- state_dict = model.state_dict()
84
- torch.save(
85
- {
86
- "model": state_dict,
87
- "iteration": iteration,
88
- "optimizer": optimizer.state_dict(),
89
- "learning_rate": learning_rate,
90
- },
91
- checkpoint_path,
92
- )
93
-
94
-
95
- def summarize(
96
- writer,
97
- global_step,
98
- scalars={},
99
- histograms={},
100
- images={},
101
- audios={},
102
- audio_sampling_rate=22050,
103
- ):
104
- for k, v in scalars.items():
105
- writer.add_scalar(k, v, global_step)
106
- for k, v in histograms.items():
107
- writer.add_histogram(k, v, global_step)
108
- for k, v in images.items():
109
- writer.add_image(k, v, global_step, dataformats="HWC")
110
- for k, v in audios.items():
111
- writer.add_audio(k, v, global_step, audio_sampling_rate)
112
-
113
-
114
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
115
- f_list = glob.glob(os.path.join(dir_path, regex))
116
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
117
- x = f_list[-1]
118
- return x
119
-
120
-
121
- def plot_spectrogram_to_numpy(spectrogram):
122
- global MATPLOTLIB_FLAG
123
- if not MATPLOTLIB_FLAG:
124
- import matplotlib
125
-
126
- matplotlib.use("Agg")
127
- MATPLOTLIB_FLAG = True
128
- mpl_logger = logging.getLogger("matplotlib")
129
- mpl_logger.setLevel(logging.WARNING)
130
- import matplotlib.pylab as plt
131
- import numpy as np
132
-
133
- fig, ax = plt.subplots(figsize=(10, 2))
134
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
135
- plt.colorbar(im, ax=ax)
136
- plt.xlabel("Frames")
137
- plt.ylabel("Channels")
138
- plt.tight_layout()
139
-
140
- fig.canvas.draw()
141
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
142
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
143
- plt.close()
144
- return data
145
-
146
-
147
- def plot_alignment_to_numpy(alignment, info=None):
148
- global MATPLOTLIB_FLAG
149
- if not MATPLOTLIB_FLAG:
150
- import matplotlib
151
-
152
- matplotlib.use("Agg")
153
- MATPLOTLIB_FLAG = True
154
- mpl_logger = logging.getLogger("matplotlib")
155
- mpl_logger.setLevel(logging.WARNING)
156
- import matplotlib.pylab as plt
157
- import numpy as np
158
-
159
- fig, ax = plt.subplots(figsize=(6, 4))
160
- im = ax.imshow(
161
- alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
162
- )
163
- fig.colorbar(im, ax=ax)
164
- xlabel = "Decoder timestep"
165
- if info is not None:
166
- xlabel += "\n\n" + info
167
- plt.xlabel(xlabel)
168
- plt.ylabel("Encoder timestep")
169
- plt.tight_layout()
170
-
171
- fig.canvas.draw()
172
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
173
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
174
- plt.close()
175
- return data
176
-
177
-
178
- def load_wav_to_torch(full_path):
179
- sampling_rate, data = read(full_path)
180
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
181
-
182
-
183
- def load_filepaths_and_text(filename, split="|"):
184
- with open(filename, encoding="utf-8") as f:
185
- filepaths_and_text = [line.strip().split(split) for line in f]
186
- return filepaths_and_text
187
-
188
-
189
- def get_hparams(init=True):
190
- parser = argparse.ArgumentParser()
191
- parser.add_argument(
192
- "-c",
193
- "--config",
194
- type=str,
195
- default="./configs/base.json",
196
- help="JSON file for configuration",
197
- )
198
- parser.add_argument("-m", "--model", type=str, required=True, help="Model name")
199
-
200
- args = parser.parse_args()
201
- model_dir = os.path.join("./logs", args.model)
202
-
203
- if not os.path.exists(model_dir):
204
- os.makedirs(model_dir)
205
-
206
- config_path = args.config
207
- config_save_path = os.path.join(model_dir, "config.json")
208
- if init:
209
- with open(config_path, "r", encoding="utf-8") as f:
210
- data = f.read()
211
- with open(config_save_path, "w", encoding="utf-8") as f:
212
- f.write(data)
213
- else:
214
- with open(config_save_path, "r", vencoding="utf-8") as f:
215
- data = f.read()
216
- config = json.loads(data)
217
- hparams = HParams(**config)
218
- hparams.model_dir = model_dir
219
- return hparams
220
-
221
-
222
- def clean_checkpoints(path_to_models="logs/44k/", n_ckpts_to_keep=2, sort_by_time=True):
223
- """Freeing up space by deleting saved ckpts
224
-
225
- Arguments:
226
- path_to_models -- Path to the model directory
227
- n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth
228
- sort_by_time -- True -> chronologically delete ckpts
229
- False -> lexicographically delete ckpts
230
- """
231
- import re
232
-
233
- ckpts_files = [
234
- f
235
- for f in os.listdir(path_to_models)
236
- if os.path.isfile(os.path.join(path_to_models, f))
237
- ]
238
-
239
- def name_key(_f):
240
- return int(re.compile("._(\\d+)\\.pth").match(_f).group(1))
241
-
242
- def time_key(_f):
243
- return os.path.getmtime(os.path.join(path_to_models, _f))
244
-
245
- sort_key = time_key if sort_by_time else name_key
246
-
247
- def x_sorted(_x):
248
- return sorted(
249
- [f for f in ckpts_files if f.startswith(_x) and not f.endswith("_0.pth")],
250
- key=sort_key,
251
- )
252
-
253
- to_del = [
254
- os.path.join(path_to_models, fn)
255
- for fn in (x_sorted("G")[:-n_ckpts_to_keep] + x_sorted("D")[:-n_ckpts_to_keep])
256
- ]
257
-
258
- def del_info(fn):
259
- return logger.info(f".. Free up space by deleting ckpt {fn}")
260
-
261
- def del_routine(x):
262
- return [os.remove(x), del_info(x)]
263
-
264
- [del_routine(fn) for fn in to_del]
265
-
266
-
267
- def get_hparams_from_dir(model_dir):
268
- config_save_path = os.path.join(model_dir, "config.json")
269
- with open(config_save_path, "r", encoding="utf-8") as f:
270
- data = f.read()
271
- config = json.loads(data)
272
-
273
- hparams = HParams(**config)
274
- hparams.model_dir = model_dir
275
- return hparams
276
-
277
-
278
- def get_hparams_from_file(config_path):
279
- with open(config_path, "r", encoding="utf-8") as f:
280
- data = f.read()
281
- config = json.loads(data)
282
-
283
- hparams = HParams(**config)
284
- return hparams
285
-
286
-
287
- def check_git_hash(model_dir):
288
- source_dir = os.path.dirname(os.path.realpath(__file__))
289
- if not os.path.exists(os.path.join(source_dir, ".git")):
290
- logger.warn(
291
- "{} is not a git repository, therefore hash value comparison will be ignored.".format(
292
- source_dir
293
- )
294
- )
295
- return
296
-
297
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
298
-
299
- path = os.path.join(model_dir, "githash")
300
- if os.path.exists(path):
301
- saved_hash = open(path).read()
302
- if saved_hash != cur_hash:
303
- logger.warn(
304
- "git hash values are different. {}(saved) != {}(current)".format(
305
- saved_hash[:8], cur_hash[:8]
306
- )
307
- )
308
- else:
309
- open(path, "w").write(cur_hash)
310
-
311
-
312
- def get_logger(model_dir, filename="train.log"):
313
- global logger
314
- logger = logging.getLogger(os.path.basename(model_dir))
315
- logger.setLevel(logging.DEBUG)
316
-
317
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
318
- if not os.path.exists(model_dir):
319
- os.makedirs(model_dir)
320
- h = logging.FileHandler(os.path.join(model_dir, filename))
321
- h.setLevel(logging.DEBUG)
322
- h.setFormatter(formatter)
323
- logger.addHandler(h)
324
- return logger
325
-
326
-
327
- class HParams:
328
- def __init__(self, **kwargs):
329
- for k, v in kwargs.items():
330
- if type(v) == dict:
331
- v = HParams(**v)
332
- self[k] = v
333
-
334
- def keys(self):
335
- return self.__dict__.keys()
336
-
337
- def items(self):
338
- return self.__dict__.items()
339
-
340
- def values(self):
341
- return self.__dict__.values()
342
-
343
- def __len__(self):
344
- return len(self.__dict__)
345
-
346
- def __getitem__(self, key):
347
- return getattr(self, key)
348
-
349
- def __setitem__(self, key, value):
350
- return setattr(self, key, value)
351
-
352
- def __contains__(self, key):
353
- return key in self.__dict__
354
-
355
- def __repr__(self):
356
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/CODE_OF_CONDUCT.md DELETED
@@ -1,130 +0,0 @@
1
-
2
- # Contributor Covenant Code of Conduct
3
-
4
- ## Our Pledge
5
-
6
- We as members, contributors, and leaders pledge to make participation in our
7
- community a harassment-free experience for everyone, regardless of age, body
8
- size, visible or invisible disability, ethnicity, sex characteristics, gender
9
- identity and expression, level of experience, education, socio-economic status,
10
- nationality, personal appearance, race, religion, or sexual identity
11
- and orientation.
12
-
13
- We pledge to act and interact in ways that contribute to an open, welcoming,
14
- diverse, inclusive, and healthy community.
15
-
16
- ## Our Standards
17
-
18
- Examples of behavior that contributes to a positive environment for our
19
- community include:
20
-
21
- * Demonstrating empathy and kindness toward other people
22
- * Being respectful of differing opinions, viewpoints, and experiences
23
- * Giving and gracefully accepting constructive feedback
24
- * Accepting responsibility and apologizing to those affected by our mistakes,
25
- and learning from the experience
26
- * Focusing on what is best not just for us as individuals, but for the
27
- overall diffusers community
28
-
29
- Examples of unacceptable behavior include:
30
-
31
- * The use of sexualized language or imagery, and sexual attention or
32
- advances of any kind
33
- * Trolling, insulting or derogatory comments, and personal or political attacks
34
- * Public or private harassment
35
- * Publishing others' private information, such as a physical or email
36
- address, without their explicit permission
37
- * Spamming issues or PRs with links to projects unrelated to this library
38
- * Other conduct which could reasonably be considered inappropriate in a
39
- professional setting
40
-
41
- ## Enforcement Responsibilities
42
-
43
- Community leaders are responsible for clarifying and enforcing our standards of
44
- acceptable behavior and will take appropriate and fair corrective action in
45
- response to any behavior that they deem inappropriate, threatening, offensive,
46
- or harmful.
47
-
48
- Community leaders have the right and responsibility to remove, edit, or reject
49
- comments, commits, code, wiki edits, issues, and other contributions that are
50
- not aligned to this Code of Conduct, and will communicate reasons for moderation
51
- decisions when appropriate.
52
-
53
- ## Scope
54
-
55
- This Code of Conduct applies within all community spaces, and also applies when
56
- an individual is officially representing the community in public spaces.
57
- Examples of representing our community include using an official e-mail address,
58
- posting via an official social media account, or acting as an appointed
59
- representative at an online or offline event.
60
-
61
- ## Enforcement
62
-
63
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
64
- reported to the community leaders responsible for enforcement at
65
66
- All complaints will be reviewed and investigated promptly and fairly.
67
-
68
- All community leaders are obligated to respect the privacy and security of the
69
- reporter of any incident.
70
-
71
- ## Enforcement Guidelines
72
-
73
- Community leaders will follow these Community Impact Guidelines in determining
74
- the consequences for any action they deem in violation of this Code of Conduct:
75
-
76
- ### 1. Correction
77
-
78
- **Community Impact**: Use of inappropriate language or other behavior deemed
79
- unprofessional or unwelcome in the community.
80
-
81
- **Consequence**: A private, written warning from community leaders, providing
82
- clarity around the nature of the violation and an explanation of why the
83
- behavior was inappropriate. A public apology may be requested.
84
-
85
- ### 2. Warning
86
-
87
- **Community Impact**: A violation through a single incident or series
88
- of actions.
89
-
90
- **Consequence**: A warning with consequences for continued behavior. No
91
- interaction with the people involved, including unsolicited interaction with
92
- those enforcing the Code of Conduct, for a specified period of time. This
93
- includes avoiding interactions in community spaces as well as external channels
94
- like social media. Violating these terms may lead to a temporary or
95
- permanent ban.
96
-
97
- ### 3. Temporary Ban
98
-
99
- **Community Impact**: A serious violation of community standards, including
100
- sustained inappropriate behavior.
101
-
102
- **Consequence**: A temporary ban from any sort of interaction or public
103
- communication with the community for a specified period of time. No public or
104
- private interaction with the people involved, including unsolicited interaction
105
- with those enforcing the Code of Conduct, is allowed during this period.
106
- Violating these terms may lead to a permanent ban.
107
-
108
- ### 4. Permanent Ban
109
-
110
- **Community Impact**: Demonstrating a pattern of violation of community
111
- standards, including sustained inappropriate behavior, harassment of an
112
- individual, or aggression toward or disparagement of classes of individuals.
113
-
114
- **Consequence**: A permanent ban from any sort of public interaction within
115
- the community.
116
-
117
- ## Attribution
118
-
119
- This Code of Conduct is adapted from the [Contributor Covenant][homepage],
120
- version 2.0, available at
121
- https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
122
-
123
- Community Impact Guidelines were inspired by [Mozilla's code of conduct
124
- enforcement ladder](https://github.com/mozilla/diversity).
125
-
126
- [homepage]: https://www.contributor-covenant.org
127
-
128
- For answers to common questions about this code of conduct, see the FAQ at
129
- https://www.contributor-covenant.org/faq. Translations are available at
130
- https://www.contributor-covenant.org/translations.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/mulit_token_textual_inversion/textual_inversion.py DELETED
@@ -1,927 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- import argparse
17
- import logging
18
- import math
19
- import os
20
- import random
21
- from pathlib import Path
22
-
23
- import numpy as np
24
- import PIL
25
- import torch
26
- import torch.nn.functional as F
27
- import torch.utils.checkpoint
28
- import transformers
29
- from accelerate import Accelerator
30
- from accelerate.logging import get_logger
31
- from accelerate.utils import ProjectConfiguration, set_seed
32
- from huggingface_hub import create_repo, upload_folder
33
- from multi_token_clip import MultiTokenCLIPTokenizer
34
-
35
- # TODO: remove and import from diffusers.utils when the new version of diffusers is released
36
- from packaging import version
37
- from PIL import Image
38
- from torch.utils.data import Dataset
39
- from torchvision import transforms
40
- from tqdm.auto import tqdm
41
- from transformers import CLIPTextModel
42
-
43
- import diffusers
44
- from diffusers import (
45
- AutoencoderKL,
46
- DDPMScheduler,
47
- DiffusionPipeline,
48
- DPMSolverMultistepScheduler,
49
- StableDiffusionPipeline,
50
- UNet2DConditionModel,
51
- )
52
- from diffusers.optimization import get_scheduler
53
- from diffusers.utils import check_min_version, is_wandb_available
54
- from diffusers.utils.import_utils import is_xformers_available
55
-
56
-
57
- if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
58
- PIL_INTERPOLATION = {
59
- "linear": PIL.Image.Resampling.BILINEAR,
60
- "bilinear": PIL.Image.Resampling.BILINEAR,
61
- "bicubic": PIL.Image.Resampling.BICUBIC,
62
- "lanczos": PIL.Image.Resampling.LANCZOS,
63
- "nearest": PIL.Image.Resampling.NEAREST,
64
- }
65
- else:
66
- PIL_INTERPOLATION = {
67
- "linear": PIL.Image.LINEAR,
68
- "bilinear": PIL.Image.BILINEAR,
69
- "bicubic": PIL.Image.BICUBIC,
70
- "lanczos": PIL.Image.LANCZOS,
71
- "nearest": PIL.Image.NEAREST,
72
- }
73
- # ------------------------------------------------------------------------------
74
-
75
-
76
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
77
- check_min_version("0.14.0.dev0")
78
-
79
- logger = get_logger(__name__)
80
-
81
-
82
- def add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=1, initializer_token=None):
83
- """
84
- Add tokens to the tokenizer and set the initial value of token embeddings
85
- """
86
- tokenizer.add_placeholder_tokens(placeholder_token, num_vec_per_token=num_vec_per_token)
87
- text_encoder.resize_token_embeddings(len(tokenizer))
88
- token_embeds = text_encoder.get_input_embeddings().weight.data
89
- placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False)
90
- if initializer_token:
91
- token_ids = tokenizer.encode(initializer_token, add_special_tokens=False)
92
- for i, placeholder_token_id in enumerate(placeholder_token_ids):
93
- token_embeds[placeholder_token_id] = token_embeds[token_ids[i * len(token_ids) // num_vec_per_token]]
94
- else:
95
- for i, placeholder_token_id in enumerate(placeholder_token_ids):
96
- token_embeds[placeholder_token_id] = torch.randn_like(token_embeds[placeholder_token_id])
97
- return placeholder_token
98
-
99
-
100
- def save_progress(tokenizer, text_encoder, accelerator, save_path):
101
- for placeholder_token in tokenizer.token_map:
102
- placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False)
103
- learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_ids]
104
- if len(placeholder_token_ids) == 1:
105
- learned_embeds = learned_embeds[None]
106
- learned_embeds_dict = {placeholder_token: learned_embeds.detach().cpu()}
107
- torch.save(learned_embeds_dict, save_path)
108
-
109
-
110
- def load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict):
111
- for placeholder_token in learned_embeds_dict:
112
- placeholder_embeds = learned_embeds_dict[placeholder_token]
113
- num_vec_per_token = placeholder_embeds.shape[0]
114
- placeholder_embeds = placeholder_embeds.to(dtype=text_encoder.dtype)
115
- add_tokens(tokenizer, text_encoder, placeholder_token, num_vec_per_token=num_vec_per_token)
116
- placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False)
117
- token_embeds = text_encoder.get_input_embeddings().weight.data
118
- for i, placeholder_token_id in enumerate(placeholder_token_ids):
119
- token_embeds[placeholder_token_id] = placeholder_embeds[i]
120
-
121
-
122
- def load_multitoken_tokenizer_from_automatic(tokenizer, text_encoder, automatic_dict, placeholder_token):
123
- """
124
- Automatic1111's tokens have format
125
- {'string_to_token': {'*': 265}, 'string_to_param': {'*': tensor([[ 0.0833, 0.0030, 0.0057, ..., -0.0264, -0.0616, -0.0529],
126
- [ 0.0058, -0.0190, -0.0584, ..., -0.0025, -0.0945, -0.0490],
127
- [ 0.0916, 0.0025, 0.0365, ..., -0.0685, -0.0124, 0.0728],
128
- [ 0.0812, -0.0199, -0.0100, ..., -0.0581, -0.0780, 0.0254]],
129
- requires_grad=True)}, 'name': 'FloralMarble-400', 'step': 399, 'sd_checkpoint': '4bdfc29c', 'sd_checkpoint_name': 'SD2.1-768'}
130
- """
131
- learned_embeds_dict = {}
132
- learned_embeds_dict[placeholder_token] = automatic_dict["string_to_param"]["*"]
133
- load_multitoken_tokenizer(tokenizer, text_encoder, learned_embeds_dict)
134
-
135
-
136
- def get_mask(tokenizer, accelerator):
137
- # Get the mask of the weights that won't change
138
- mask = torch.ones(len(tokenizer)).to(accelerator.device, dtype=torch.bool)
139
- for placeholder_token in tokenizer.token_map:
140
- placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False)
141
- for i in range(len(placeholder_token_ids)):
142
- mask = mask & (torch.arange(len(tokenizer)) != placeholder_token_ids[i]).to(accelerator.device)
143
- return mask
144
-
145
-
146
- def parse_args():
147
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
148
- parser.add_argument(
149
- "--progressive_tokens_max_steps",
150
- type=int,
151
- default=2000,
152
- help="The number of steps until all tokens will be used.",
153
- )
154
- parser.add_argument(
155
- "--progressive_tokens",
156
- action="store_true",
157
- help="Progressively train the tokens. For example, first train for 1 token, then 2 tokens and so on.",
158
- )
159
- parser.add_argument("--vector_shuffle", action="store_true", help="Shuffling tokens durint training")
160
- parser.add_argument(
161
- "--num_vec_per_token",
162
- type=int,
163
- default=1,
164
- help=(
165
- "The number of vectors used to represent the placeholder token. The higher the number, the better the"
166
- " result at the cost of editability. This can be fixed by prompt editing."
167
- ),
168
- )
169
- parser.add_argument(
170
- "--save_steps",
171
- type=int,
172
- default=500,
173
- help="Save learned_embeds.bin every X updates steps.",
174
- )
175
- parser.add_argument(
176
- "--only_save_embeds",
177
- action="store_true",
178
- default=False,
179
- help="Save only the embeddings for the new concept.",
180
- )
181
- parser.add_argument(
182
- "--pretrained_model_name_or_path",
183
- type=str,
184
- default=None,
185
- required=True,
186
- help="Path to pretrained model or model identifier from huggingface.co/models.",
187
- )
188
- parser.add_argument(
189
- "--revision",
190
- type=str,
191
- default=None,
192
- required=False,
193
- help="Revision of pretrained model identifier from huggingface.co/models.",
194
- )
195
- parser.add_argument(
196
- "--tokenizer_name",
197
- type=str,
198
- default=None,
199
- help="Pretrained tokenizer name or path if not the same as model_name",
200
- )
201
- parser.add_argument(
202
- "--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
203
- )
204
- parser.add_argument(
205
- "--placeholder_token",
206
- type=str,
207
- default=None,
208
- required=True,
209
- help="A token to use as a placeholder for the concept.",
210
- )
211
- parser.add_argument(
212
- "--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
213
- )
214
- parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
215
- parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
216
- parser.add_argument(
217
- "--output_dir",
218
- type=str,
219
- default="text-inversion-model",
220
- help="The output directory where the model predictions and checkpoints will be written.",
221
- )
222
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
223
- parser.add_argument(
224
- "--resolution",
225
- type=int,
226
- default=512,
227
- help=(
228
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
229
- " resolution"
230
- ),
231
- )
232
- parser.add_argument(
233
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution."
234
- )
235
- parser.add_argument(
236
- "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
237
- )
238
- parser.add_argument("--num_train_epochs", type=int, default=100)
239
- parser.add_argument(
240
- "--max_train_steps",
241
- type=int,
242
- default=5000,
243
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
244
- )
245
- parser.add_argument(
246
- "--gradient_accumulation_steps",
247
- type=int,
248
- default=1,
249
- help="Number of updates steps to accumulate before performing a backward/update pass.",
250
- )
251
- parser.add_argument(
252
- "--gradient_checkpointing",
253
- action="store_true",
254
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
255
- )
256
- parser.add_argument(
257
- "--learning_rate",
258
- type=float,
259
- default=1e-4,
260
- help="Initial learning rate (after the potential warmup period) to use.",
261
- )
262
- parser.add_argument(
263
- "--scale_lr",
264
- action="store_true",
265
- default=False,
266
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
267
- )
268
- parser.add_argument(
269
- "--lr_scheduler",
270
- type=str,
271
- default="constant",
272
- help=(
273
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
274
- ' "constant", "constant_with_warmup"]'
275
- ),
276
- )
277
- parser.add_argument(
278
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
279
- )
280
- parser.add_argument(
281
- "--dataloader_num_workers",
282
- type=int,
283
- default=0,
284
- help=(
285
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
286
- ),
287
- )
288
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
289
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
290
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
291
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
292
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
293
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
294
- parser.add_argument(
295
- "--hub_model_id",
296
- type=str,
297
- default=None,
298
- help="The name of the repository to keep in sync with the local `output_dir`.",
299
- )
300
- parser.add_argument(
301
- "--logging_dir",
302
- type=str,
303
- default="logs",
304
- help=(
305
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
306
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
307
- ),
308
- )
309
- parser.add_argument(
310
- "--mixed_precision",
311
- type=str,
312
- default="no",
313
- choices=["no", "fp16", "bf16"],
314
- help=(
315
- "Whether to use mixed precision. Choose"
316
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
317
- "and an Nvidia Ampere GPU."
318
- ),
319
- )
320
- parser.add_argument(
321
- "--allow_tf32",
322
- action="store_true",
323
- help=(
324
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
325
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
326
- ),
327
- )
328
- parser.add_argument(
329
- "--report_to",
330
- type=str,
331
- default="tensorboard",
332
- help=(
333
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
334
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
335
- ),
336
- )
337
- parser.add_argument(
338
- "--validation_prompt",
339
- type=str,
340
- default=None,
341
- help="A prompt that is used during validation to verify that the model is learning.",
342
- )
343
- parser.add_argument(
344
- "--num_validation_images",
345
- type=int,
346
- default=4,
347
- help="Number of images that should be generated during validation with `validation_prompt`.",
348
- )
349
- parser.add_argument(
350
- "--validation_epochs",
351
- type=int,
352
- default=50,
353
- help=(
354
- "Run validation every X epochs. Validation consists of running the prompt"
355
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
356
- " and logging the images."
357
- ),
358
- )
359
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
360
- parser.add_argument(
361
- "--checkpointing_steps",
362
- type=int,
363
- default=500,
364
- help=(
365
- "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
366
- " training using `--resume_from_checkpoint`."
367
- ),
368
- )
369
- parser.add_argument(
370
- "--checkpoints_total_limit",
371
- type=int,
372
- default=None,
373
- help=(
374
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
375
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
376
- " for more docs"
377
- ),
378
- )
379
- parser.add_argument(
380
- "--resume_from_checkpoint",
381
- type=str,
382
- default=None,
383
- help=(
384
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
385
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
386
- ),
387
- )
388
- parser.add_argument(
389
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
390
- )
391
-
392
- args = parser.parse_args()
393
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
394
- if env_local_rank != -1 and env_local_rank != args.local_rank:
395
- args.local_rank = env_local_rank
396
-
397
- if args.train_data_dir is None:
398
- raise ValueError("You must specify a train data directory.")
399
-
400
- return args
401
-
402
-
403
- imagenet_templates_small = [
404
- "a photo of a {}",
405
- "a rendering of a {}",
406
- "a cropped photo of the {}",
407
- "the photo of a {}",
408
- "a photo of a clean {}",
409
- "a photo of a dirty {}",
410
- "a dark photo of the {}",
411
- "a photo of my {}",
412
- "a photo of the cool {}",
413
- "a close-up photo of a {}",
414
- "a bright photo of the {}",
415
- "a cropped photo of a {}",
416
- "a photo of the {}",
417
- "a good photo of the {}",
418
- "a photo of one {}",
419
- "a close-up photo of the {}",
420
- "a rendition of the {}",
421
- "a photo of the clean {}",
422
- "a rendition of a {}",
423
- "a photo of a nice {}",
424
- "a good photo of a {}",
425
- "a photo of the nice {}",
426
- "a photo of the small {}",
427
- "a photo of the weird {}",
428
- "a photo of the large {}",
429
- "a photo of a cool {}",
430
- "a photo of a small {}",
431
- ]
432
-
433
- imagenet_style_templates_small = [
434
- "a painting in the style of {}",
435
- "a rendering in the style of {}",
436
- "a cropped painting in the style of {}",
437
- "the painting in the style of {}",
438
- "a clean painting in the style of {}",
439
- "a dirty painting in the style of {}",
440
- "a dark painting in the style of {}",
441
- "a picture in the style of {}",
442
- "a cool painting in the style of {}",
443
- "a close-up painting in the style of {}",
444
- "a bright painting in the style of {}",
445
- "a cropped painting in the style of {}",
446
- "a good painting in the style of {}",
447
- "a close-up painting in the style of {}",
448
- "a rendition in the style of {}",
449
- "a nice painting in the style of {}",
450
- "a small painting in the style of {}",
451
- "a weird painting in the style of {}",
452
- "a large painting in the style of {}",
453
- ]
454
-
455
-
456
- class TextualInversionDataset(Dataset):
457
- def __init__(
458
- self,
459
- data_root,
460
- tokenizer,
461
- learnable_property="object", # [object, style]
462
- size=512,
463
- repeats=100,
464
- interpolation="bicubic",
465
- flip_p=0.5,
466
- set="train",
467
- placeholder_token="*",
468
- center_crop=False,
469
- vector_shuffle=False,
470
- progressive_tokens=False,
471
- ):
472
- self.data_root = data_root
473
- self.tokenizer = tokenizer
474
- self.learnable_property = learnable_property
475
- self.size = size
476
- self.placeholder_token = placeholder_token
477
- self.center_crop = center_crop
478
- self.flip_p = flip_p
479
- self.vector_shuffle = vector_shuffle
480
- self.progressive_tokens = progressive_tokens
481
- self.prop_tokens_to_load = 0
482
-
483
- self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
484
-
485
- self.num_images = len(self.image_paths)
486
- self._length = self.num_images
487
-
488
- if set == "train":
489
- self._length = self.num_images * repeats
490
-
491
- self.interpolation = {
492
- "linear": PIL_INTERPOLATION["linear"],
493
- "bilinear": PIL_INTERPOLATION["bilinear"],
494
- "bicubic": PIL_INTERPOLATION["bicubic"],
495
- "lanczos": PIL_INTERPOLATION["lanczos"],
496
- }[interpolation]
497
-
498
- self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
499
- self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
500
-
501
- def __len__(self):
502
- return self._length
503
-
504
- def __getitem__(self, i):
505
- example = {}
506
- image = Image.open(self.image_paths[i % self.num_images])
507
-
508
- if not image.mode == "RGB":
509
- image = image.convert("RGB")
510
-
511
- placeholder_string = self.placeholder_token
512
- text = random.choice(self.templates).format(placeholder_string)
513
-
514
- example["input_ids"] = self.tokenizer.encode(
515
- text,
516
- padding="max_length",
517
- truncation=True,
518
- max_length=self.tokenizer.model_max_length,
519
- return_tensors="pt",
520
- vector_shuffle=self.vector_shuffle,
521
- prop_tokens_to_load=self.prop_tokens_to_load if self.progressive_tokens else 1.0,
522
- )[0]
523
-
524
- # default to score-sde preprocessing
525
- img = np.array(image).astype(np.uint8)
526
-
527
- if self.center_crop:
528
- crop = min(img.shape[0], img.shape[1])
529
- (
530
- h,
531
- w,
532
- ) = (
533
- img.shape[0],
534
- img.shape[1],
535
- )
536
- img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
537
-
538
- image = Image.fromarray(img)
539
- image = image.resize((self.size, self.size), resample=self.interpolation)
540
-
541
- image = self.flip_transform(image)
542
- image = np.array(image).astype(np.uint8)
543
- image = (image / 127.5 - 1.0).astype(np.float32)
544
-
545
- example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
546
- return example
547
-
548
-
549
- def main():
550
- args = parse_args()
551
- logging_dir = os.path.join(args.output_dir, args.logging_dir)
552
- accelerator_project_config = ProjectConfiguration(
553
- total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir
554
- )
555
-
556
- accelerator = Accelerator(
557
- gradient_accumulation_steps=args.gradient_accumulation_steps,
558
- mixed_precision=args.mixed_precision,
559
- log_with=args.report_to,
560
- project_config=accelerator_project_config,
561
- )
562
-
563
- if args.report_to == "wandb":
564
- if not is_wandb_available():
565
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
566
- import wandb
567
-
568
- # Make one log on every process with the configuration for debugging.
569
- logging.basicConfig(
570
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
571
- datefmt="%m/%d/%Y %H:%M:%S",
572
- level=logging.INFO,
573
- )
574
- logger.info(accelerator.state, main_process_only=False)
575
- if accelerator.is_local_main_process:
576
- transformers.utils.logging.set_verbosity_warning()
577
- diffusers.utils.logging.set_verbosity_info()
578
- else:
579
- transformers.utils.logging.set_verbosity_error()
580
- diffusers.utils.logging.set_verbosity_error()
581
-
582
- # If passed along, set the training seed now.
583
- if args.seed is not None:
584
- set_seed(args.seed)
585
-
586
- # Handle the repository creation
587
- if accelerator.is_main_process:
588
- if args.output_dir is not None:
589
- os.makedirs(args.output_dir, exist_ok=True)
590
-
591
- if args.push_to_hub:
592
- repo_id = create_repo(
593
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
594
- ).repo_id
595
-
596
- # Load tokenizer
597
- if args.tokenizer_name:
598
- tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.tokenizer_name)
599
- elif args.pretrained_model_name_or_path:
600
- tokenizer = MultiTokenCLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
601
-
602
- # Load scheduler and models
603
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
604
- text_encoder = CLIPTextModel.from_pretrained(
605
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
606
- )
607
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
608
- unet = UNet2DConditionModel.from_pretrained(
609
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
610
- )
611
- if is_xformers_available():
612
- try:
613
- unet.enable_xformers_memory_efficient_attention()
614
- except Exception as e:
615
- logger.warning(
616
- "Could not enable memory efficient attention. Make sure xformers is installed"
617
- f" correctly and a GPU is available: {e}"
618
- )
619
- add_tokens(tokenizer, text_encoder, args.placeholder_token, args.num_vec_per_token, args.initializer_token)
620
-
621
- # Freeze vae and unet
622
- vae.requires_grad_(False)
623
- unet.requires_grad_(False)
624
- # Freeze all parameters except for the token embeddings in text encoder
625
- text_encoder.text_model.encoder.requires_grad_(False)
626
- text_encoder.text_model.final_layer_norm.requires_grad_(False)
627
- text_encoder.text_model.embeddings.position_embedding.requires_grad_(False)
628
-
629
- if args.gradient_checkpointing:
630
- # Keep unet in train mode if we are using gradient checkpointing to save memory.
631
- # The dropout cannot be != 0 so it doesn't matter if we are in eval or train mode.
632
- unet.train()
633
- text_encoder.gradient_checkpointing_enable()
634
- unet.enable_gradient_checkpointing()
635
-
636
- if args.enable_xformers_memory_efficient_attention:
637
- if is_xformers_available():
638
- import xformers
639
-
640
- xformers_version = version.parse(xformers.__version__)
641
- if xformers_version == version.parse("0.0.16"):
642
- logger.warn(
643
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
644
- )
645
- unet.enable_xformers_memory_efficient_attention()
646
- else:
647
- raise ValueError("xformers is not available. Make sure it is installed correctly")
648
-
649
- # Enable TF32 for faster training on Ampere GPUs,
650
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
651
- if args.allow_tf32:
652
- torch.backends.cuda.matmul.allow_tf32 = True
653
-
654
- if args.scale_lr:
655
- args.learning_rate = (
656
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
657
- )
658
-
659
- # Initialize the optimizer
660
- optimizer = torch.optim.AdamW(
661
- text_encoder.get_input_embeddings().parameters(), # only optimize the embeddings
662
- lr=args.learning_rate,
663
- betas=(args.adam_beta1, args.adam_beta2),
664
- weight_decay=args.adam_weight_decay,
665
- eps=args.adam_epsilon,
666
- )
667
-
668
- # Dataset and DataLoaders creation:
669
- train_dataset = TextualInversionDataset(
670
- data_root=args.train_data_dir,
671
- tokenizer=tokenizer,
672
- size=args.resolution,
673
- placeholder_token=args.placeholder_token,
674
- repeats=args.repeats,
675
- learnable_property=args.learnable_property,
676
- center_crop=args.center_crop,
677
- set="train",
678
- )
679
- train_dataloader = torch.utils.data.DataLoader(
680
- train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
681
- )
682
-
683
- # Scheduler and math around the number of training steps.
684
- overrode_max_train_steps = False
685
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
686
- if args.max_train_steps is None:
687
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
688
- overrode_max_train_steps = True
689
-
690
- lr_scheduler = get_scheduler(
691
- args.lr_scheduler,
692
- optimizer=optimizer,
693
- num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
694
- num_training_steps=args.max_train_steps * accelerator.num_processes,
695
- )
696
-
697
- # Prepare everything with our `accelerator`.
698
- text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
699
- text_encoder, optimizer, train_dataloader, lr_scheduler
700
- )
701
-
702
- # For mixed precision training we cast the unet and vae weights to half-precision
703
- # as these models are only used for inference, keeping weights in full precision is not required.
704
- weight_dtype = torch.float32
705
- if accelerator.mixed_precision == "fp16":
706
- weight_dtype = torch.float16
707
- elif accelerator.mixed_precision == "bf16":
708
- weight_dtype = torch.bfloat16
709
-
710
- # Move vae and unet to device and cast to weight_dtype
711
- unet.to(accelerator.device, dtype=weight_dtype)
712
- vae.to(accelerator.device, dtype=weight_dtype)
713
-
714
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
715
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
716
- if overrode_max_train_steps:
717
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
718
- # Afterwards we recalculate our number of training epochs
719
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
720
-
721
- # We need to initialize the trackers we use, and also store our configuration.
722
- # The trackers initializes automatically on the main process.
723
- if accelerator.is_main_process:
724
- accelerator.init_trackers("textual_inversion", config=vars(args))
725
-
726
- # Train!
727
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
728
-
729
- logger.info("***** Running training *****")
730
- logger.info(f" Num examples = {len(train_dataset)}")
731
- logger.info(f" Num Epochs = {args.num_train_epochs}")
732
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
733
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
734
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
735
- logger.info(f" Total optimization steps = {args.max_train_steps}")
736
- global_step = 0
737
- first_epoch = 0
738
-
739
- # Potentially load in the weights and states from a previous save
740
- if args.resume_from_checkpoint:
741
- if args.resume_from_checkpoint != "latest":
742
- path = os.path.basename(args.resume_from_checkpoint)
743
- else:
744
- # Get the most recent checkpoint
745
- dirs = os.listdir(args.output_dir)
746
- dirs = [d for d in dirs if d.startswith("checkpoint")]
747
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
748
- path = dirs[-1] if len(dirs) > 0 else None
749
-
750
- if path is None:
751
- accelerator.print(
752
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
753
- )
754
- args.resume_from_checkpoint = None
755
- else:
756
- accelerator.print(f"Resuming from checkpoint {path}")
757
- accelerator.load_state(os.path.join(args.output_dir, path))
758
- global_step = int(path.split("-")[1])
759
-
760
- resume_global_step = global_step * args.gradient_accumulation_steps
761
- first_epoch = global_step // num_update_steps_per_epoch
762
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
763
-
764
- # Only show the progress bar once on each machine.
765
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
766
- progress_bar.set_description("Steps")
767
-
768
- # keep original embeddings as reference
769
- orig_embeds_params = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight.data.clone()
770
-
771
- for epoch in range(first_epoch, args.num_train_epochs):
772
- text_encoder.train()
773
- for step, batch in enumerate(train_dataloader):
774
- # Skip steps until we reach the resumed step
775
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
776
- if step % args.gradient_accumulation_steps == 0:
777
- progress_bar.update(1)
778
- continue
779
- if args.progressive_tokens:
780
- train_dataset.prop_tokens_to_load = float(global_step) / args.progressive_tokens_max_steps
781
-
782
- with accelerator.accumulate(text_encoder):
783
- # Convert images to latent space
784
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample().detach()
785
- latents = latents * vae.config.scaling_factor
786
-
787
- # Sample noise that we'll add to the latents
788
- noise = torch.randn_like(latents)
789
- bsz = latents.shape[0]
790
- # Sample a random timestep for each image
791
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
792
- timesteps = timesteps.long()
793
-
794
- # Add noise to the latents according to the noise magnitude at each timestep
795
- # (this is the forward diffusion process)
796
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
797
-
798
- # Get the text embedding for conditioning
799
- encoder_hidden_states = text_encoder(batch["input_ids"])[0].to(dtype=weight_dtype)
800
-
801
- # Predict the noise residual
802
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
803
-
804
- # Get the target for loss depending on the prediction type
805
- if noise_scheduler.config.prediction_type == "epsilon":
806
- target = noise
807
- elif noise_scheduler.config.prediction_type == "v_prediction":
808
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
809
- else:
810
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
811
-
812
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
813
-
814
- accelerator.backward(loss)
815
-
816
- optimizer.step()
817
- lr_scheduler.step()
818
- optimizer.zero_grad()
819
-
820
- # Let's make sure we don't update any embedding weights besides the newly added token
821
- index_no_updates = get_mask(tokenizer, accelerator)
822
- with torch.no_grad():
823
- accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[
824
- index_no_updates
825
- ] = orig_embeds_params[index_no_updates]
826
-
827
- # Checks if the accelerator has performed an optimization step behind the scenes
828
- if accelerator.sync_gradients:
829
- progress_bar.update(1)
830
- global_step += 1
831
- if global_step % args.save_steps == 0:
832
- save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
833
- save_progress(tokenizer, text_encoder, accelerator, save_path)
834
-
835
- if global_step % args.checkpointing_steps == 0:
836
- if accelerator.is_main_process:
837
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
838
- accelerator.save_state(save_path)
839
- logger.info(f"Saved state to {save_path}")
840
-
841
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
842
- progress_bar.set_postfix(**logs)
843
- accelerator.log(logs, step=global_step)
844
-
845
- if global_step >= args.max_train_steps:
846
- break
847
-
848
- if accelerator.is_main_process and args.validation_prompt is not None and epoch % args.validation_epochs == 0:
849
- logger.info(
850
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
851
- f" {args.validation_prompt}."
852
- )
853
- # create pipeline (note: unet and vae are loaded again in float32)
854
- pipeline = DiffusionPipeline.from_pretrained(
855
- args.pretrained_model_name_or_path,
856
- text_encoder=accelerator.unwrap_model(text_encoder),
857
- tokenizer=tokenizer,
858
- unet=unet,
859
- vae=vae,
860
- revision=args.revision,
861
- torch_dtype=weight_dtype,
862
- )
863
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
864
- pipeline = pipeline.to(accelerator.device)
865
- pipeline.set_progress_bar_config(disable=True)
866
-
867
- # run inference
868
- generator = (
869
- None if args.seed is None else torch.Generator(device=accelerator.device).manual_seed(args.seed)
870
- )
871
- images = []
872
- for _ in range(args.num_validation_images):
873
- with torch.autocast("cuda"):
874
- image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
875
- images.append(image)
876
-
877
- for tracker in accelerator.trackers:
878
- if tracker.name == "tensorboard":
879
- np_images = np.stack([np.asarray(img) for img in images])
880
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
881
- if tracker.name == "wandb":
882
- tracker.log(
883
- {
884
- "validation": [
885
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
886
- for i, image in enumerate(images)
887
- ]
888
- }
889
- )
890
-
891
- del pipeline
892
- torch.cuda.empty_cache()
893
-
894
- # Create the pipeline using using the trained modules and save it.
895
- accelerator.wait_for_everyone()
896
- if accelerator.is_main_process:
897
- if args.push_to_hub and args.only_save_embeds:
898
- logger.warn("Enabling full model saving because --push_to_hub=True was specified.")
899
- save_full_model = True
900
- else:
901
- save_full_model = not args.only_save_embeds
902
- if save_full_model:
903
- pipeline = StableDiffusionPipeline.from_pretrained(
904
- args.pretrained_model_name_or_path,
905
- text_encoder=accelerator.unwrap_model(text_encoder),
906
- vae=vae,
907
- unet=unet,
908
- tokenizer=tokenizer,
909
- )
910
- pipeline.save_pretrained(args.output_dir)
911
- # Save the newly trained embeddings
912
- save_path = os.path.join(args.output_dir, "learned_embeds.bin")
913
- save_progress(tokenizer, text_encoder, accelerator, save_path)
914
-
915
- if args.push_to_hub:
916
- upload_folder(
917
- repo_id=repo_id,
918
- folder_path=args.output_dir,
919
- commit_message="End of training",
920
- ignore_patterns=["step_*", "epoch_*"],
921
- )
922
-
923
- accelerator.end_training()
924
-
925
-
926
- if __name__ == "__main__":
927
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete_flax.py DELETED
@@ -1,283 +0,0 @@
1
- # Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from dataclasses import dataclass
16
- from typing import Optional, Tuple, Union
17
-
18
- import flax
19
- import jax.numpy as jnp
20
- from scipy import integrate
21
-
22
- from ..configuration_utils import ConfigMixin, register_to_config
23
- from .scheduling_utils_flax import (
24
- CommonSchedulerState,
25
- FlaxKarrasDiffusionSchedulers,
26
- FlaxSchedulerMixin,
27
- FlaxSchedulerOutput,
28
- broadcast_to_shape_from_left,
29
- )
30
-
31
-
32
- @flax.struct.dataclass
33
- class LMSDiscreteSchedulerState:
34
- common: CommonSchedulerState
35
-
36
- # setable values
37
- init_noise_sigma: jnp.ndarray
38
- timesteps: jnp.ndarray
39
- sigmas: jnp.ndarray
40
- num_inference_steps: Optional[int] = None
41
-
42
- # running values
43
- derivatives: Optional[jnp.ndarray] = None
44
-
45
- @classmethod
46
- def create(
47
- cls, common: CommonSchedulerState, init_noise_sigma: jnp.ndarray, timesteps: jnp.ndarray, sigmas: jnp.ndarray
48
- ):
49
- return cls(common=common, init_noise_sigma=init_noise_sigma, timesteps=timesteps, sigmas=sigmas)
50
-
51
-
52
- @dataclass
53
- class FlaxLMSSchedulerOutput(FlaxSchedulerOutput):
54
- state: LMSDiscreteSchedulerState
55
-
56
-
57
- class FlaxLMSDiscreteScheduler(FlaxSchedulerMixin, ConfigMixin):
58
- """
59
- Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by
60
- Katherine Crowson:
61
- https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181
62
-
63
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
64
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
65
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
66
- [`~SchedulerMixin.from_pretrained`] functions.
67
-
68
- Args:
69
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
70
- beta_start (`float`): the starting `beta` value of inference.
71
- beta_end (`float`): the final `beta` value.
72
- beta_schedule (`str`):
73
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
74
- `linear` or `scaled_linear`.
75
- trained_betas (`jnp.ndarray`, optional):
76
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
77
- prediction_type (`str`, default `epsilon`, optional):
78
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
79
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
80
- https://imagen.research.google/video/paper.pdf)
81
- dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
82
- the `dtype` used for params and computation.
83
- """
84
-
85
- _compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
86
-
87
- dtype: jnp.dtype
88
-
89
- @property
90
- def has_state(self):
91
- return True
92
-
93
- @register_to_config
94
- def __init__(
95
- self,
96
- num_train_timesteps: int = 1000,
97
- beta_start: float = 0.0001,
98
- beta_end: float = 0.02,
99
- beta_schedule: str = "linear",
100
- trained_betas: Optional[jnp.ndarray] = None,
101
- prediction_type: str = "epsilon",
102
- dtype: jnp.dtype = jnp.float32,
103
- ):
104
- self.dtype = dtype
105
-
106
- def create_state(self, common: Optional[CommonSchedulerState] = None) -> LMSDiscreteSchedulerState:
107
- if common is None:
108
- common = CommonSchedulerState.create(self)
109
-
110
- timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
111
- sigmas = ((1 - common.alphas_cumprod) / common.alphas_cumprod) ** 0.5
112
-
113
- # standard deviation of the initial noise distribution
114
- init_noise_sigma = sigmas.max()
115
-
116
- return LMSDiscreteSchedulerState.create(
117
- common=common,
118
- init_noise_sigma=init_noise_sigma,
119
- timesteps=timesteps,
120
- sigmas=sigmas,
121
- )
122
-
123
- def scale_model_input(self, state: LMSDiscreteSchedulerState, sample: jnp.ndarray, timestep: int) -> jnp.ndarray:
124
- """
125
- Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm.
126
-
127
- Args:
128
- state (`LMSDiscreteSchedulerState`):
129
- the `FlaxLMSDiscreteScheduler` state data class instance.
130
- sample (`jnp.ndarray`):
131
- current instance of sample being created by diffusion process.
132
- timestep (`int`):
133
- current discrete timestep in the diffusion chain.
134
-
135
- Returns:
136
- `jnp.ndarray`: scaled input sample
137
- """
138
- (step_index,) = jnp.where(state.timesteps == timestep, size=1)
139
- step_index = step_index[0]
140
-
141
- sigma = state.sigmas[step_index]
142
- sample = sample / ((sigma**2 + 1) ** 0.5)
143
- return sample
144
-
145
- def get_lms_coefficient(self, state: LMSDiscreteSchedulerState, order, t, current_order):
146
- """
147
- Compute a linear multistep coefficient.
148
-
149
- Args:
150
- order (TODO):
151
- t (TODO):
152
- current_order (TODO):
153
- """
154
-
155
- def lms_derivative(tau):
156
- prod = 1.0
157
- for k in range(order):
158
- if current_order == k:
159
- continue
160
- prod *= (tau - state.sigmas[t - k]) / (state.sigmas[t - current_order] - state.sigmas[t - k])
161
- return prod
162
-
163
- integrated_coeff = integrate.quad(lms_derivative, state.sigmas[t], state.sigmas[t + 1], epsrel=1e-4)[0]
164
-
165
- return integrated_coeff
166
-
167
- def set_timesteps(
168
- self, state: LMSDiscreteSchedulerState, num_inference_steps: int, shape: Tuple = ()
169
- ) -> LMSDiscreteSchedulerState:
170
- """
171
- Sets the timesteps used for the diffusion chain. Supporting function to be run before inference.
172
-
173
- Args:
174
- state (`LMSDiscreteSchedulerState`):
175
- the `FlaxLMSDiscreteScheduler` state data class instance.
176
- num_inference_steps (`int`):
177
- the number of diffusion steps used when generating samples with a pre-trained model.
178
- """
179
-
180
- timesteps = jnp.linspace(self.config.num_train_timesteps - 1, 0, num_inference_steps, dtype=self.dtype)
181
-
182
- low_idx = jnp.floor(timesteps).astype(jnp.int32)
183
- high_idx = jnp.ceil(timesteps).astype(jnp.int32)
184
-
185
- frac = jnp.mod(timesteps, 1.0)
186
-
187
- sigmas = ((1 - state.common.alphas_cumprod) / state.common.alphas_cumprod) ** 0.5
188
- sigmas = (1 - frac) * sigmas[low_idx] + frac * sigmas[high_idx]
189
- sigmas = jnp.concatenate([sigmas, jnp.array([0.0], dtype=self.dtype)])
190
-
191
- timesteps = timesteps.astype(jnp.int32)
192
-
193
- # initial running values
194
- derivatives = jnp.zeros((0,) + shape, dtype=self.dtype)
195
-
196
- return state.replace(
197
- timesteps=timesteps,
198
- sigmas=sigmas,
199
- num_inference_steps=num_inference_steps,
200
- derivatives=derivatives,
201
- )
202
-
203
- def step(
204
- self,
205
- state: LMSDiscreteSchedulerState,
206
- model_output: jnp.ndarray,
207
- timestep: int,
208
- sample: jnp.ndarray,
209
- order: int = 4,
210
- return_dict: bool = True,
211
- ) -> Union[FlaxLMSSchedulerOutput, Tuple]:
212
- """
213
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
214
- process from the learned model outputs (most often the predicted noise).
215
-
216
- Args:
217
- state (`LMSDiscreteSchedulerState`): the `FlaxLMSDiscreteScheduler` state data class instance.
218
- model_output (`jnp.ndarray`): direct output from learned diffusion model.
219
- timestep (`int`): current discrete timestep in the diffusion chain.
220
- sample (`jnp.ndarray`):
221
- current instance of sample being created by diffusion process.
222
- order: coefficient for multi-step inference.
223
- return_dict (`bool`): option for returning tuple rather than FlaxLMSSchedulerOutput class
224
-
225
- Returns:
226
- [`FlaxLMSSchedulerOutput`] or `tuple`: [`FlaxLMSSchedulerOutput`] if `return_dict` is True, otherwise a
227
- `tuple`. When returning a tuple, the first element is the sample tensor.
228
-
229
- """
230
- if state.num_inference_steps is None:
231
- raise ValueError(
232
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
233
- )
234
-
235
- sigma = state.sigmas[timestep]
236
-
237
- # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
238
- if self.config.prediction_type == "epsilon":
239
- pred_original_sample = sample - sigma * model_output
240
- elif self.config.prediction_type == "v_prediction":
241
- # * c_out + input * c_skip
242
- pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1))
243
- else:
244
- raise ValueError(
245
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`"
246
- )
247
-
248
- # 2. Convert to an ODE derivative
249
- derivative = (sample - pred_original_sample) / sigma
250
- state = state.replace(derivatives=jnp.append(state.derivatives, derivative))
251
- if len(state.derivatives) > order:
252
- state = state.replace(derivatives=jnp.delete(state.derivatives, 0))
253
-
254
- # 3. Compute linear multistep coefficients
255
- order = min(timestep + 1, order)
256
- lms_coeffs = [self.get_lms_coefficient(state, order, timestep, curr_order) for curr_order in range(order)]
257
-
258
- # 4. Compute previous sample based on the derivatives path
259
- prev_sample = sample + sum(
260
- coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(state.derivatives))
261
- )
262
-
263
- if not return_dict:
264
- return (prev_sample, state)
265
-
266
- return FlaxLMSSchedulerOutput(prev_sample=prev_sample, state=state)
267
-
268
- def add_noise(
269
- self,
270
- state: LMSDiscreteSchedulerState,
271
- original_samples: jnp.ndarray,
272
- noise: jnp.ndarray,
273
- timesteps: jnp.ndarray,
274
- ) -> jnp.ndarray:
275
- sigma = state.sigmas[timesteps].flatten()
276
- sigma = broadcast_to_shape_from_left(sigma, noise.shape)
277
-
278
- noisy_samples = original_samples + noise * sigma
279
-
280
- return noisy_samples
281
-
282
- def __len__(self):
283
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/repaint/__init__.py DELETED
File without changes
spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py DELETED
@@ -1,16 +0,0 @@
1
- _base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- norm_eval=True,
14
- style='pytorch',
15
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
16
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/emanet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/Andyrasika/Andyrasika-lora_diffusion/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/Andyrasika/lora_diffusion").launch()
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/commons.py DELETED
@@ -1,172 +0,0 @@
1
- import math
2
- import torch
3
- from torch.nn import functional as F
4
- import torch.jit
5
-
6
-
7
- def script_method(fn, _rcb=None):
8
- return fn
9
-
10
-
11
- def script(obj, optimize=True, _frames_up=0, _rcb=None):
12
- return obj
13
-
14
-
15
- torch.jit.script_method = script_method
16
- torch.jit.script = script
17
-
18
-
19
- def init_weights(m, mean=0.0, std=0.01):
20
- classname = m.__class__.__name__
21
- if classname.find("Conv") != -1:
22
- m.weight.data.normal_(mean, std)
23
-
24
-
25
- def get_padding(kernel_size, dilation=1):
26
- return int((kernel_size*dilation - dilation)/2)
27
-
28
-
29
- def convert_pad_shape(pad_shape):
30
- l = pad_shape[::-1]
31
- pad_shape = [item for sublist in l for item in sublist]
32
- return pad_shape
33
-
34
-
35
- def intersperse(lst, item):
36
- result = [item] * (len(lst) * 2 + 1)
37
- result[1::2] = lst
38
- return result
39
-
40
-
41
- def kl_divergence(m_p, logs_p, m_q, logs_q):
42
- """KL(P||Q)"""
43
- kl = (logs_q - logs_p) - 0.5
44
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
45
- return kl
46
-
47
-
48
- def rand_gumbel(shape):
49
- """Sample from the Gumbel distribution, protect from overflows."""
50
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
51
- return -torch.log(-torch.log(uniform_samples))
52
-
53
-
54
- def rand_gumbel_like(x):
55
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
56
- return g
57
-
58
-
59
- def slice_segments(x, ids_str, segment_size=4):
60
- ret = torch.zeros_like(x[:, :, :segment_size])
61
- for i in range(x.size(0)):
62
- idx_str = ids_str[i]
63
- idx_end = idx_str + segment_size
64
- ret[i] = x[i, :, idx_str:idx_end]
65
- return ret
66
-
67
-
68
- def rand_slice_segments(x, x_lengths=None, segment_size=4):
69
- b, d, t = x.size()
70
- if x_lengths is None:
71
- x_lengths = t
72
- ids_str_max = x_lengths - segment_size + 1
73
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
74
- ret = slice_segments(x, ids_str, segment_size)
75
- return ret, ids_str
76
-
77
-
78
- def get_timing_signal_1d(
79
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
80
- position = torch.arange(length, dtype=torch.float)
81
- num_timescales = channels // 2
82
- log_timescale_increment = (
83
- math.log(float(max_timescale) / float(min_timescale)) /
84
- (num_timescales - 1))
85
- inv_timescales = min_timescale * torch.exp(
86
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
87
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
88
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
89
- signal = F.pad(signal, [0, 0, 0, channels % 2])
90
- signal = signal.view(1, channels, length)
91
- return signal
92
-
93
-
94
- def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
95
- b, channels, length = x.size()
96
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
97
- return x + signal.to(dtype=x.dtype, device=x.device)
98
-
99
-
100
- def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
101
- b, channels, length = x.size()
102
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
103
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
104
-
105
-
106
- def subsequent_mask(length):
107
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
108
- return mask
109
-
110
-
111
- @torch.jit.script
112
- def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
113
- n_channels_int = n_channels[0]
114
- in_act = input_a + input_b
115
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
116
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
117
- acts = t_act * s_act
118
- return acts
119
-
120
-
121
- def convert_pad_shape(pad_shape):
122
- l = pad_shape[::-1]
123
- pad_shape = [item for sublist in l for item in sublist]
124
- return pad_shape
125
-
126
-
127
- def shift_1d(x):
128
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
129
- return x
130
-
131
-
132
- def sequence_mask(length, max_length=None):
133
- if max_length is None:
134
- max_length = length.max()
135
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
136
- return x.unsqueeze(0) < length.unsqueeze(1)
137
-
138
-
139
- def generate_path(duration, mask):
140
- """
141
- duration: [b, 1, t_x]
142
- mask: [b, 1, t_y, t_x]
143
- """
144
- device = duration.device
145
-
146
- b, _, t_y, t_x = mask.shape
147
- cum_duration = torch.cumsum(duration, -1)
148
-
149
- cum_duration_flat = cum_duration.view(b * t_x)
150
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
151
- path = path.view(b, t_x, t_y)
152
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
153
- path = path.unsqueeze(1).transpose(2,3) * mask
154
- return path
155
-
156
-
157
- def clip_grad_value_(parameters, clip_value, norm_type=2):
158
- if isinstance(parameters, torch.Tensor):
159
- parameters = [parameters]
160
- parameters = list(filter(lambda p: p.grad is not None, parameters))
161
- norm_type = float(norm_type)
162
- if clip_value is not None:
163
- clip_value = float(clip_value)
164
-
165
- total_norm = 0
166
- for p in parameters:
167
- param_norm = p.grad.data.norm(norm_type)
168
- total_norm += param_norm.item() ** norm_type
169
- if clip_value is not None:
170
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
171
- total_norm = total_norm ** (1. / norm_type)
172
- return total_norm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Grounding DINO
3
- # url: https://github.com/IDEA-Research/GroundingDINO
4
- # Copyright (c) 2023 IDEA. All Rights Reserved.
5
- # Licensed under the Apache License, Version 2.0 [see LICENSE for details]
6
- # ------------------------------------------------------------------------
7
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
8
- from .GroundingDINO import build_groundingdino
9
-
10
-
11
- def build_model(args):
12
- # we use register to maintain models from catdet6 on.
13
- from .registry import MODULE_BUILD_FUNCS
14
-
15
- assert args.modelname in MODULE_BUILD_FUNCS._module_dict
16
- build_func = MODULE_BUILD_FUNCS.get(args.modelname)
17
- model = build_func(args)
18
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/connectionpool.py DELETED
@@ -1,1110 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- import errno
4
- import logging
5
- import re
6
- import socket
7
- import sys
8
- import warnings
9
- from socket import error as SocketError
10
- from socket import timeout as SocketTimeout
11
-
12
- from .connection import (
13
- BaseSSLError,
14
- BrokenPipeError,
15
- DummyConnection,
16
- HTTPConnection,
17
- HTTPException,
18
- HTTPSConnection,
19
- VerifiedHTTPSConnection,
20
- port_by_scheme,
21
- )
22
- from .exceptions import (
23
- ClosedPoolError,
24
- EmptyPoolError,
25
- HeaderParsingError,
26
- HostChangedError,
27
- InsecureRequestWarning,
28
- LocationValueError,
29
- MaxRetryError,
30
- NewConnectionError,
31
- ProtocolError,
32
- ProxyError,
33
- ReadTimeoutError,
34
- SSLError,
35
- TimeoutError,
36
- )
37
- from .packages import six
38
- from .packages.six.moves import queue
39
- from .request import RequestMethods
40
- from .response import HTTPResponse
41
- from .util.connection import is_connection_dropped
42
- from .util.proxy import connection_requires_http_tunnel
43
- from .util.queue import LifoQueue
44
- from .util.request import set_file_position
45
- from .util.response import assert_header_parsing
46
- from .util.retry import Retry
47
- from .util.ssl_match_hostname import CertificateError
48
- from .util.timeout import Timeout
49
- from .util.url import Url, _encode_target
50
- from .util.url import _normalize_host as normalize_host
51
- from .util.url import get_host, parse_url
52
-
53
- xrange = six.moves.xrange
54
-
55
- log = logging.getLogger(__name__)
56
-
57
- _Default = object()
58
-
59
-
60
- # Pool objects
61
- class ConnectionPool(object):
62
- """
63
- Base class for all connection pools, such as
64
- :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
65
-
66
- .. note::
67
- ConnectionPool.urlopen() does not normalize or percent-encode target URIs
68
- which is useful if your target server doesn't support percent-encoded
69
- target URIs.
70
- """
71
-
72
- scheme = None
73
- QueueCls = LifoQueue
74
-
75
- def __init__(self, host, port=None):
76
- if not host:
77
- raise LocationValueError("No host specified.")
78
-
79
- self.host = _normalize_host(host, scheme=self.scheme)
80
- self._proxy_host = host.lower()
81
- self.port = port
82
-
83
- def __str__(self):
84
- return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
85
-
86
- def __enter__(self):
87
- return self
88
-
89
- def __exit__(self, exc_type, exc_val, exc_tb):
90
- self.close()
91
- # Return False to re-raise any potential exceptions
92
- return False
93
-
94
- def close(self):
95
- """
96
- Close all pooled connections and disable the pool.
97
- """
98
- pass
99
-
100
-
101
- # This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
102
- _blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
103
-
104
-
105
- class HTTPConnectionPool(ConnectionPool, RequestMethods):
106
- """
107
- Thread-safe connection pool for one host.
108
-
109
- :param host:
110
- Host used for this HTTP Connection (e.g. "localhost"), passed into
111
- :class:`http.client.HTTPConnection`.
112
-
113
- :param port:
114
- Port used for this HTTP Connection (None is equivalent to 80), passed
115
- into :class:`http.client.HTTPConnection`.
116
-
117
- :param strict:
118
- Causes BadStatusLine to be raised if the status line can't be parsed
119
- as a valid HTTP/1.0 or 1.1 status line, passed into
120
- :class:`http.client.HTTPConnection`.
121
-
122
- .. note::
123
- Only works in Python 2. This parameter is ignored in Python 3.
124
-
125
- :param timeout:
126
- Socket timeout in seconds for each individual connection. This can
127
- be a float or integer, which sets the timeout for the HTTP request,
128
- or an instance of :class:`urllib3.util.Timeout` which gives you more
129
- fine-grained control over request timeouts. After the constructor has
130
- been parsed, this is always a `urllib3.util.Timeout` object.
131
-
132
- :param maxsize:
133
- Number of connections to save that can be reused. More than 1 is useful
134
- in multithreaded situations. If ``block`` is set to False, more
135
- connections will be created but they will not be saved once they've
136
- been used.
137
-
138
- :param block:
139
- If set to True, no more than ``maxsize`` connections will be used at
140
- a time. When no free connections are available, the call will block
141
- until a connection has been released. This is a useful side effect for
142
- particular multithreaded situations where one does not want to use more
143
- than maxsize connections per host to prevent flooding.
144
-
145
- :param headers:
146
- Headers to include with all requests, unless other headers are given
147
- explicitly.
148
-
149
- :param retries:
150
- Retry configuration to use by default with requests in this pool.
151
-
152
- :param _proxy:
153
- Parsed proxy URL, should not be used directly, instead, see
154
- :class:`urllib3.ProxyManager`
155
-
156
- :param _proxy_headers:
157
- A dictionary with proxy headers, should not be used directly,
158
- instead, see :class:`urllib3.ProxyManager`
159
-
160
- :param \\**conn_kw:
161
- Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
162
- :class:`urllib3.connection.HTTPSConnection` instances.
163
- """
164
-
165
- scheme = "http"
166
- ConnectionCls = HTTPConnection
167
- ResponseCls = HTTPResponse
168
-
169
- def __init__(
170
- self,
171
- host,
172
- port=None,
173
- strict=False,
174
- timeout=Timeout.DEFAULT_TIMEOUT,
175
- maxsize=1,
176
- block=False,
177
- headers=None,
178
- retries=None,
179
- _proxy=None,
180
- _proxy_headers=None,
181
- _proxy_config=None,
182
- **conn_kw
183
- ):
184
- ConnectionPool.__init__(self, host, port)
185
- RequestMethods.__init__(self, headers)
186
-
187
- self.strict = strict
188
-
189
- if not isinstance(timeout, Timeout):
190
- timeout = Timeout.from_float(timeout)
191
-
192
- if retries is None:
193
- retries = Retry.DEFAULT
194
-
195
- self.timeout = timeout
196
- self.retries = retries
197
-
198
- self.pool = self.QueueCls(maxsize)
199
- self.block = block
200
-
201
- self.proxy = _proxy
202
- self.proxy_headers = _proxy_headers or {}
203
- self.proxy_config = _proxy_config
204
-
205
- # Fill the queue up so that doing get() on it will block properly
206
- for _ in xrange(maxsize):
207
- self.pool.put(None)
208
-
209
- # These are mostly for testing and debugging purposes.
210
- self.num_connections = 0
211
- self.num_requests = 0
212
- self.conn_kw = conn_kw
213
-
214
- if self.proxy:
215
- # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
216
- # We cannot know if the user has added default socket options, so we cannot replace the
217
- # list.
218
- self.conn_kw.setdefault("socket_options", [])
219
-
220
- self.conn_kw["proxy"] = self.proxy
221
- self.conn_kw["proxy_config"] = self.proxy_config
222
-
223
- def _new_conn(self):
224
- """
225
- Return a fresh :class:`HTTPConnection`.
226
- """
227
- self.num_connections += 1
228
- log.debug(
229
- "Starting new HTTP connection (%d): %s:%s",
230
- self.num_connections,
231
- self.host,
232
- self.port or "80",
233
- )
234
-
235
- conn = self.ConnectionCls(
236
- host=self.host,
237
- port=self.port,
238
- timeout=self.timeout.connect_timeout,
239
- strict=self.strict,
240
- **self.conn_kw
241
- )
242
- return conn
243
-
244
- def _get_conn(self, timeout=None):
245
- """
246
- Get a connection. Will return a pooled connection if one is available.
247
-
248
- If no connections are available and :prop:`.block` is ``False``, then a
249
- fresh connection is returned.
250
-
251
- :param timeout:
252
- Seconds to wait before giving up and raising
253
- :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
254
- :prop:`.block` is ``True``.
255
- """
256
- conn = None
257
- try:
258
- conn = self.pool.get(block=self.block, timeout=timeout)
259
-
260
- except AttributeError: # self.pool is None
261
- raise ClosedPoolError(self, "Pool is closed.")
262
-
263
- except queue.Empty:
264
- if self.block:
265
- raise EmptyPoolError(
266
- self,
267
- "Pool reached maximum size and no more connections are allowed.",
268
- )
269
- pass # Oh well, we'll create a new connection then
270
-
271
- # If this is a persistent connection, check if it got disconnected
272
- if conn and is_connection_dropped(conn):
273
- log.debug("Resetting dropped connection: %s", self.host)
274
- conn.close()
275
- if getattr(conn, "auto_open", 1) == 0:
276
- # This is a proxied connection that has been mutated by
277
- # http.client._tunnel() and cannot be reused (since it would
278
- # attempt to bypass the proxy)
279
- conn = None
280
-
281
- return conn or self._new_conn()
282
-
283
- def _put_conn(self, conn):
284
- """
285
- Put a connection back into the pool.
286
-
287
- :param conn:
288
- Connection object for the current host and port as returned by
289
- :meth:`._new_conn` or :meth:`._get_conn`.
290
-
291
- If the pool is already full, the connection is closed and discarded
292
- because we exceeded maxsize. If connections are discarded frequently,
293
- then maxsize should be increased.
294
-
295
- If the pool is closed, then the connection will be closed and discarded.
296
- """
297
- try:
298
- self.pool.put(conn, block=False)
299
- return # Everything is dandy, done.
300
- except AttributeError:
301
- # self.pool is None.
302
- pass
303
- except queue.Full:
304
- # This should never happen if self.block == True
305
- log.warning(
306
- "Connection pool is full, discarding connection: %s. Connection pool size: %s",
307
- self.host,
308
- self.pool.qsize(),
309
- )
310
- # Connection never got put back into the pool, close it.
311
- if conn:
312
- conn.close()
313
-
314
- def _validate_conn(self, conn):
315
- """
316
- Called right before a request is made, after the socket is created.
317
- """
318
- pass
319
-
320
- def _prepare_proxy(self, conn):
321
- # Nothing to do for HTTP connections.
322
- pass
323
-
324
- def _get_timeout(self, timeout):
325
- """Helper that always returns a :class:`urllib3.util.Timeout`"""
326
- if timeout is _Default:
327
- return self.timeout.clone()
328
-
329
- if isinstance(timeout, Timeout):
330
- return timeout.clone()
331
- else:
332
- # User passed us an int/float. This is for backwards compatibility,
333
- # can be removed later
334
- return Timeout.from_float(timeout)
335
-
336
- def _raise_timeout(self, err, url, timeout_value):
337
- """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
338
-
339
- if isinstance(err, SocketTimeout):
340
- raise ReadTimeoutError(
341
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
342
- )
343
-
344
- # See the above comment about EAGAIN in Python 3. In Python 2 we have
345
- # to specifically catch it and throw the timeout error
346
- if hasattr(err, "errno") and err.errno in _blocking_errnos:
347
- raise ReadTimeoutError(
348
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
349
- )
350
-
351
- # Catch possible read timeouts thrown as SSL errors. If not the
352
- # case, rethrow the original. We need to do this because of:
353
- # http://bugs.python.org/issue10272
354
- if "timed out" in str(err) or "did not complete (read)" in str(
355
- err
356
- ): # Python < 2.7.4
357
- raise ReadTimeoutError(
358
- self, url, "Read timed out. (read timeout=%s)" % timeout_value
359
- )
360
-
361
- def _make_request(
362
- self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
363
- ):
364
- """
365
- Perform a request on a given urllib connection object taken from our
366
- pool.
367
-
368
- :param conn:
369
- a connection from one of our connection pools
370
-
371
- :param timeout:
372
- Socket timeout in seconds for the request. This can be a
373
- float or integer, which will set the same timeout value for
374
- the socket connect and the socket read, or an instance of
375
- :class:`urllib3.util.Timeout`, which gives you more fine-grained
376
- control over your timeouts.
377
- """
378
- self.num_requests += 1
379
-
380
- timeout_obj = self._get_timeout(timeout)
381
- timeout_obj.start_connect()
382
- conn.timeout = Timeout.resolve_default_timeout(timeout_obj.connect_timeout)
383
-
384
- # Trigger any extra validation we need to do.
385
- try:
386
- self._validate_conn(conn)
387
- except (SocketTimeout, BaseSSLError) as e:
388
- # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
389
- self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
390
- raise
391
-
392
- # conn.request() calls http.client.*.request, not the method in
393
- # urllib3.request. It also calls makefile (recv) on the socket.
394
- try:
395
- if chunked:
396
- conn.request_chunked(method, url, **httplib_request_kw)
397
- else:
398
- conn.request(method, url, **httplib_request_kw)
399
-
400
- # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
401
- # legitimately able to close the connection after sending a valid response.
402
- # With this behaviour, the received response is still readable.
403
- except BrokenPipeError:
404
- # Python 3
405
- pass
406
- except IOError as e:
407
- # Python 2 and macOS/Linux
408
- # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
409
- # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
410
- if e.errno not in {
411
- errno.EPIPE,
412
- errno.ESHUTDOWN,
413
- errno.EPROTOTYPE,
414
- }:
415
- raise
416
-
417
- # Reset the timeout for the recv() on the socket
418
- read_timeout = timeout_obj.read_timeout
419
-
420
- # App Engine doesn't have a sock attr
421
- if getattr(conn, "sock", None):
422
- # In Python 3 socket.py will catch EAGAIN and return None when you
423
- # try and read into the file pointer created by http.client, which
424
- # instead raises a BadStatusLine exception. Instead of catching
425
- # the exception and assuming all BadStatusLine exceptions are read
426
- # timeouts, check for a zero timeout before making the request.
427
- if read_timeout == 0:
428
- raise ReadTimeoutError(
429
- self, url, "Read timed out. (read timeout=%s)" % read_timeout
430
- )
431
- if read_timeout is Timeout.DEFAULT_TIMEOUT:
432
- conn.sock.settimeout(socket.getdefaulttimeout())
433
- else: # None or a value
434
- conn.sock.settimeout(read_timeout)
435
-
436
- # Receive the response from the server
437
- try:
438
- try:
439
- # Python 2.7, use buffering of HTTP responses
440
- httplib_response = conn.getresponse(buffering=True)
441
- except TypeError:
442
- # Python 3
443
- try:
444
- httplib_response = conn.getresponse()
445
- except BaseException as e:
446
- # Remove the TypeError from the exception chain in
447
- # Python 3 (including for exceptions like SystemExit).
448
- # Otherwise it looks like a bug in the code.
449
- six.raise_from(e, None)
450
- except (SocketTimeout, BaseSSLError, SocketError) as e:
451
- self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
452
- raise
453
-
454
- # AppEngine doesn't have a version attr.
455
- http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
456
- log.debug(
457
- '%s://%s:%s "%s %s %s" %s %s',
458
- self.scheme,
459
- self.host,
460
- self.port,
461
- method,
462
- url,
463
- http_version,
464
- httplib_response.status,
465
- httplib_response.length,
466
- )
467
-
468
- try:
469
- assert_header_parsing(httplib_response.msg)
470
- except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
471
- log.warning(
472
- "Failed to parse headers (url=%s): %s",
473
- self._absolute_url(url),
474
- hpe,
475
- exc_info=True,
476
- )
477
-
478
- return httplib_response
479
-
480
- def _absolute_url(self, path):
481
- return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
482
-
483
- def close(self):
484
- """
485
- Close all pooled connections and disable the pool.
486
- """
487
- if self.pool is None:
488
- return
489
- # Disable access to the pool
490
- old_pool, self.pool = self.pool, None
491
-
492
- try:
493
- while True:
494
- conn = old_pool.get(block=False)
495
- if conn:
496
- conn.close()
497
-
498
- except queue.Empty:
499
- pass # Done.
500
-
501
- def is_same_host(self, url):
502
- """
503
- Check if the given ``url`` is a member of the same host as this
504
- connection pool.
505
- """
506
- if url.startswith("/"):
507
- return True
508
-
509
- # TODO: Add optional support for socket.gethostbyname checking.
510
- scheme, host, port = get_host(url)
511
- if host is not None:
512
- host = _normalize_host(host, scheme=scheme)
513
-
514
- # Use explicit default port for comparison when none is given
515
- if self.port and not port:
516
- port = port_by_scheme.get(scheme)
517
- elif not self.port and port == port_by_scheme.get(scheme):
518
- port = None
519
-
520
- return (scheme, host, port) == (self.scheme, self.host, self.port)
521
-
522
- def urlopen(
523
- self,
524
- method,
525
- url,
526
- body=None,
527
- headers=None,
528
- retries=None,
529
- redirect=True,
530
- assert_same_host=True,
531
- timeout=_Default,
532
- pool_timeout=None,
533
- release_conn=None,
534
- chunked=False,
535
- body_pos=None,
536
- **response_kw
537
- ):
538
- """
539
- Get a connection from the pool and perform an HTTP request. This is the
540
- lowest level call for making a request, so you'll need to specify all
541
- the raw details.
542
-
543
- .. note::
544
-
545
- More commonly, it's appropriate to use a convenience method provided
546
- by :class:`.RequestMethods`, such as :meth:`request`.
547
-
548
- .. note::
549
-
550
- `release_conn` will only behave as expected if
551
- `preload_content=False` because we want to make
552
- `preload_content=False` the default behaviour someday soon without
553
- breaking backwards compatibility.
554
-
555
- :param method:
556
- HTTP request method (such as GET, POST, PUT, etc.)
557
-
558
- :param url:
559
- The URL to perform the request on.
560
-
561
- :param body:
562
- Data to send in the request body, either :class:`str`, :class:`bytes`,
563
- an iterable of :class:`str`/:class:`bytes`, or a file-like object.
564
-
565
- :param headers:
566
- Dictionary of custom headers to send, such as User-Agent,
567
- If-None-Match, etc. If None, pool headers are used. If provided,
568
- these headers completely replace any pool-specific headers.
569
-
570
- :param retries:
571
- Configure the number of retries to allow before raising a
572
- :class:`~urllib3.exceptions.MaxRetryError` exception.
573
-
574
- Pass ``None`` to retry until you receive a response. Pass a
575
- :class:`~urllib3.util.retry.Retry` object for fine-grained control
576
- over different types of retries.
577
- Pass an integer number to retry connection errors that many times,
578
- but no other types of errors. Pass zero to never retry.
579
-
580
- If ``False``, then retries are disabled and any exception is raised
581
- immediately. Also, instead of raising a MaxRetryError on redirects,
582
- the redirect response will be returned.
583
-
584
- :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
585
-
586
- :param redirect:
587
- If True, automatically handle redirects (status codes 301, 302,
588
- 303, 307, 308). Each redirect counts as a retry. Disabling retries
589
- will disable redirect, too.
590
-
591
- :param assert_same_host:
592
- If ``True``, will make sure that the host of the pool requests is
593
- consistent else will raise HostChangedError. When ``False``, you can
594
- use the pool on an HTTP proxy and request foreign hosts.
595
-
596
- :param timeout:
597
- If specified, overrides the default timeout for this one
598
- request. It may be a float (in seconds) or an instance of
599
- :class:`urllib3.util.Timeout`.
600
-
601
- :param pool_timeout:
602
- If set and the pool is set to block=True, then this method will
603
- block for ``pool_timeout`` seconds and raise EmptyPoolError if no
604
- connection is available within the time period.
605
-
606
- :param release_conn:
607
- If False, then the urlopen call will not release the connection
608
- back into the pool once a response is received (but will release if
609
- you read the entire contents of the response such as when
610
- `preload_content=True`). This is useful if you're not preloading
611
- the response's content immediately. You will need to call
612
- ``r.release_conn()`` on the response ``r`` to return the connection
613
- back into the pool. If None, it takes the value of
614
- ``response_kw.get('preload_content', True)``.
615
-
616
- :param chunked:
617
- If True, urllib3 will send the body using chunked transfer
618
- encoding. Otherwise, urllib3 will send the body using the standard
619
- content-length form. Defaults to False.
620
-
621
- :param int body_pos:
622
- Position to seek to in file-like body in the event of a retry or
623
- redirect. Typically this won't need to be set because urllib3 will
624
- auto-populate the value when needed.
625
-
626
- :param \\**response_kw:
627
- Additional parameters are passed to
628
- :meth:`urllib3.response.HTTPResponse.from_httplib`
629
- """
630
-
631
- parsed_url = parse_url(url)
632
- destination_scheme = parsed_url.scheme
633
-
634
- if headers is None:
635
- headers = self.headers
636
-
637
- if not isinstance(retries, Retry):
638
- retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
639
-
640
- if release_conn is None:
641
- release_conn = response_kw.get("preload_content", True)
642
-
643
- # Check host
644
- if assert_same_host and not self.is_same_host(url):
645
- raise HostChangedError(self, url, retries)
646
-
647
- # Ensure that the URL we're connecting to is properly encoded
648
- if url.startswith("/"):
649
- url = six.ensure_str(_encode_target(url))
650
- else:
651
- url = six.ensure_str(parsed_url.url)
652
-
653
- conn = None
654
-
655
- # Track whether `conn` needs to be released before
656
- # returning/raising/recursing. Update this variable if necessary, and
657
- # leave `release_conn` constant throughout the function. That way, if
658
- # the function recurses, the original value of `release_conn` will be
659
- # passed down into the recursive call, and its value will be respected.
660
- #
661
- # See issue #651 [1] for details.
662
- #
663
- # [1] <https://github.com/urllib3/urllib3/issues/651>
664
- release_this_conn = release_conn
665
-
666
- http_tunnel_required = connection_requires_http_tunnel(
667
- self.proxy, self.proxy_config, destination_scheme
668
- )
669
-
670
- # Merge the proxy headers. Only done when not using HTTP CONNECT. We
671
- # have to copy the headers dict so we can safely change it without those
672
- # changes being reflected in anyone else's copy.
673
- if not http_tunnel_required:
674
- headers = headers.copy()
675
- headers.update(self.proxy_headers)
676
-
677
- # Must keep the exception bound to a separate variable or else Python 3
678
- # complains about UnboundLocalError.
679
- err = None
680
-
681
- # Keep track of whether we cleanly exited the except block. This
682
- # ensures we do proper cleanup in finally.
683
- clean_exit = False
684
-
685
- # Rewind body position, if needed. Record current position
686
- # for future rewinds in the event of a redirect/retry.
687
- body_pos = set_file_position(body, body_pos)
688
-
689
- try:
690
- # Request a connection from the queue.
691
- timeout_obj = self._get_timeout(timeout)
692
- conn = self._get_conn(timeout=pool_timeout)
693
-
694
- conn.timeout = timeout_obj.connect_timeout
695
-
696
- is_new_proxy_conn = self.proxy is not None and not getattr(
697
- conn, "sock", None
698
- )
699
- if is_new_proxy_conn and http_tunnel_required:
700
- self._prepare_proxy(conn)
701
-
702
- # Make the request on the httplib connection object.
703
- httplib_response = self._make_request(
704
- conn,
705
- method,
706
- url,
707
- timeout=timeout_obj,
708
- body=body,
709
- headers=headers,
710
- chunked=chunked,
711
- )
712
-
713
- # If we're going to release the connection in ``finally:``, then
714
- # the response doesn't need to know about the connection. Otherwise
715
- # it will also try to release it and we'll have a double-release
716
- # mess.
717
- response_conn = conn if not release_conn else None
718
-
719
- # Pass method to Response for length checking
720
- response_kw["request_method"] = method
721
-
722
- # Import httplib's response into our own wrapper object
723
- response = self.ResponseCls.from_httplib(
724
- httplib_response,
725
- pool=self,
726
- connection=response_conn,
727
- retries=retries,
728
- **response_kw
729
- )
730
-
731
- # Everything went great!
732
- clean_exit = True
733
-
734
- except EmptyPoolError:
735
- # Didn't get a connection from the pool, no need to clean up
736
- clean_exit = True
737
- release_this_conn = False
738
- raise
739
-
740
- except (
741
- TimeoutError,
742
- HTTPException,
743
- SocketError,
744
- ProtocolError,
745
- BaseSSLError,
746
- SSLError,
747
- CertificateError,
748
- ) as e:
749
- # Discard the connection for these exceptions. It will be
750
- # replaced during the next _get_conn() call.
751
- clean_exit = False
752
-
753
- def _is_ssl_error_message_from_http_proxy(ssl_error):
754
- # We're trying to detect the message 'WRONG_VERSION_NUMBER' but
755
- # SSLErrors are kinda all over the place when it comes to the message,
756
- # so we try to cover our bases here!
757
- message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
758
- return (
759
- "wrong version number" in message or "unknown protocol" in message
760
- )
761
-
762
- # Try to detect a common user error with proxies which is to
763
- # set an HTTP proxy to be HTTPS when it should be 'http://'
764
- # (ie {'http': 'http://proxy', 'https': 'https://proxy'})
765
- # Instead we add a nice error message and point to a URL.
766
- if (
767
- isinstance(e, BaseSSLError)
768
- and self.proxy
769
- and _is_ssl_error_message_from_http_proxy(e)
770
- and conn.proxy
771
- and conn.proxy.scheme == "https"
772
- ):
773
- e = ProxyError(
774
- "Your proxy appears to only use HTTP and not HTTPS, "
775
- "try changing your proxy URL to be HTTP. See: "
776
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
777
- "#https-proxy-error-http-proxy",
778
- SSLError(e),
779
- )
780
- elif isinstance(e, (BaseSSLError, CertificateError)):
781
- e = SSLError(e)
782
- elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
783
- e = ProxyError("Cannot connect to proxy.", e)
784
- elif isinstance(e, (SocketError, HTTPException)):
785
- e = ProtocolError("Connection aborted.", e)
786
-
787
- retries = retries.increment(
788
- method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
789
- )
790
- retries.sleep()
791
-
792
- # Keep track of the error for the retry warning.
793
- err = e
794
-
795
- finally:
796
- if not clean_exit:
797
- # We hit some kind of exception, handled or otherwise. We need
798
- # to throw the connection away unless explicitly told not to.
799
- # Close the connection, set the variable to None, and make sure
800
- # we put the None back in the pool to avoid leaking it.
801
- conn = conn and conn.close()
802
- release_this_conn = True
803
-
804
- if release_this_conn:
805
- # Put the connection back to be reused. If the connection is
806
- # expired then it will be None, which will get replaced with a
807
- # fresh connection during _get_conn.
808
- self._put_conn(conn)
809
-
810
- if not conn:
811
- # Try again
812
- log.warning(
813
- "Retrying (%r) after connection broken by '%r': %s", retries, err, url
814
- )
815
- return self.urlopen(
816
- method,
817
- url,
818
- body,
819
- headers,
820
- retries,
821
- redirect,
822
- assert_same_host,
823
- timeout=timeout,
824
- pool_timeout=pool_timeout,
825
- release_conn=release_conn,
826
- chunked=chunked,
827
- body_pos=body_pos,
828
- **response_kw
829
- )
830
-
831
- # Handle redirect?
832
- redirect_location = redirect and response.get_redirect_location()
833
- if redirect_location:
834
- if response.status == 303:
835
- method = "GET"
836
-
837
- try:
838
- retries = retries.increment(method, url, response=response, _pool=self)
839
- except MaxRetryError:
840
- if retries.raise_on_redirect:
841
- response.drain_conn()
842
- raise
843
- return response
844
-
845
- response.drain_conn()
846
- retries.sleep_for_retry(response)
847
- log.debug("Redirecting %s -> %s", url, redirect_location)
848
- return self.urlopen(
849
- method,
850
- redirect_location,
851
- body,
852
- headers,
853
- retries=retries,
854
- redirect=redirect,
855
- assert_same_host=assert_same_host,
856
- timeout=timeout,
857
- pool_timeout=pool_timeout,
858
- release_conn=release_conn,
859
- chunked=chunked,
860
- body_pos=body_pos,
861
- **response_kw
862
- )
863
-
864
- # Check if we should retry the HTTP response.
865
- has_retry_after = bool(response.headers.get("Retry-After"))
866
- if retries.is_retry(method, response.status, has_retry_after):
867
- try:
868
- retries = retries.increment(method, url, response=response, _pool=self)
869
- except MaxRetryError:
870
- if retries.raise_on_status:
871
- response.drain_conn()
872
- raise
873
- return response
874
-
875
- response.drain_conn()
876
- retries.sleep(response)
877
- log.debug("Retry: %s", url)
878
- return self.urlopen(
879
- method,
880
- url,
881
- body,
882
- headers,
883
- retries=retries,
884
- redirect=redirect,
885
- assert_same_host=assert_same_host,
886
- timeout=timeout,
887
- pool_timeout=pool_timeout,
888
- release_conn=release_conn,
889
- chunked=chunked,
890
- body_pos=body_pos,
891
- **response_kw
892
- )
893
-
894
- return response
895
-
896
-
897
- class HTTPSConnectionPool(HTTPConnectionPool):
898
- """
899
- Same as :class:`.HTTPConnectionPool`, but HTTPS.
900
-
901
- :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
902
- ``assert_hostname`` and ``host`` in this order to verify connections.
903
- If ``assert_hostname`` is False, no verification is done.
904
-
905
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
906
- ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
907
- is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
908
- the connection socket into an SSL socket.
909
- """
910
-
911
- scheme = "https"
912
- ConnectionCls = HTTPSConnection
913
-
914
- def __init__(
915
- self,
916
- host,
917
- port=None,
918
- strict=False,
919
- timeout=Timeout.DEFAULT_TIMEOUT,
920
- maxsize=1,
921
- block=False,
922
- headers=None,
923
- retries=None,
924
- _proxy=None,
925
- _proxy_headers=None,
926
- key_file=None,
927
- cert_file=None,
928
- cert_reqs=None,
929
- key_password=None,
930
- ca_certs=None,
931
- ssl_version=None,
932
- assert_hostname=None,
933
- assert_fingerprint=None,
934
- ca_cert_dir=None,
935
- **conn_kw
936
- ):
937
-
938
- HTTPConnectionPool.__init__(
939
- self,
940
- host,
941
- port,
942
- strict,
943
- timeout,
944
- maxsize,
945
- block,
946
- headers,
947
- retries,
948
- _proxy,
949
- _proxy_headers,
950
- **conn_kw
951
- )
952
-
953
- self.key_file = key_file
954
- self.cert_file = cert_file
955
- self.cert_reqs = cert_reqs
956
- self.key_password = key_password
957
- self.ca_certs = ca_certs
958
- self.ca_cert_dir = ca_cert_dir
959
- self.ssl_version = ssl_version
960
- self.assert_hostname = assert_hostname
961
- self.assert_fingerprint = assert_fingerprint
962
-
963
- def _prepare_conn(self, conn):
964
- """
965
- Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
966
- and establish the tunnel if proxy is used.
967
- """
968
-
969
- if isinstance(conn, VerifiedHTTPSConnection):
970
- conn.set_cert(
971
- key_file=self.key_file,
972
- key_password=self.key_password,
973
- cert_file=self.cert_file,
974
- cert_reqs=self.cert_reqs,
975
- ca_certs=self.ca_certs,
976
- ca_cert_dir=self.ca_cert_dir,
977
- assert_hostname=self.assert_hostname,
978
- assert_fingerprint=self.assert_fingerprint,
979
- )
980
- conn.ssl_version = self.ssl_version
981
- return conn
982
-
983
- def _prepare_proxy(self, conn):
984
- """
985
- Establishes a tunnel connection through HTTP CONNECT.
986
-
987
- Tunnel connection is established early because otherwise httplib would
988
- improperly set Host: header to proxy's IP:port.
989
- """
990
-
991
- conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
992
-
993
- if self.proxy.scheme == "https":
994
- conn.tls_in_tls_required = True
995
-
996
- conn.connect()
997
-
998
- def _new_conn(self):
999
- """
1000
- Return a fresh :class:`http.client.HTTPSConnection`.
1001
- """
1002
- self.num_connections += 1
1003
- log.debug(
1004
- "Starting new HTTPS connection (%d): %s:%s",
1005
- self.num_connections,
1006
- self.host,
1007
- self.port or "443",
1008
- )
1009
-
1010
- if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
1011
- raise SSLError(
1012
- "Can't connect to HTTPS URL because the SSL module is not available."
1013
- )
1014
-
1015
- actual_host = self.host
1016
- actual_port = self.port
1017
- if self.proxy is not None:
1018
- actual_host = self.proxy.host
1019
- actual_port = self.proxy.port
1020
-
1021
- conn = self.ConnectionCls(
1022
- host=actual_host,
1023
- port=actual_port,
1024
- timeout=self.timeout.connect_timeout,
1025
- strict=self.strict,
1026
- cert_file=self.cert_file,
1027
- key_file=self.key_file,
1028
- key_password=self.key_password,
1029
- **self.conn_kw
1030
- )
1031
-
1032
- return self._prepare_conn(conn)
1033
-
1034
- def _validate_conn(self, conn):
1035
- """
1036
- Called right before a request is made, after the socket is created.
1037
- """
1038
- super(HTTPSConnectionPool, self)._validate_conn(conn)
1039
-
1040
- # Force connect early to allow us to validate the connection.
1041
- if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
1042
- conn.connect()
1043
-
1044
- if not conn.is_verified:
1045
- warnings.warn(
1046
- (
1047
- "Unverified HTTPS request is being made to host '%s'. "
1048
- "Adding certificate verification is strongly advised. See: "
1049
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
1050
- "#ssl-warnings" % conn.host
1051
- ),
1052
- InsecureRequestWarning,
1053
- )
1054
-
1055
- if getattr(conn, "proxy_is_verified", None) is False:
1056
- warnings.warn(
1057
- (
1058
- "Unverified HTTPS connection done to an HTTPS proxy. "
1059
- "Adding certificate verification is strongly advised. See: "
1060
- "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
1061
- "#ssl-warnings"
1062
- ),
1063
- InsecureRequestWarning,
1064
- )
1065
-
1066
-
1067
- def connection_from_url(url, **kw):
1068
- """
1069
- Given a url, return an :class:`.ConnectionPool` instance of its host.
1070
-
1071
- This is a shortcut for not having to parse out the scheme, host, and port
1072
- of the url before creating an :class:`.ConnectionPool` instance.
1073
-
1074
- :param url:
1075
- Absolute URL string that must include the scheme. Port is optional.
1076
-
1077
- :param \\**kw:
1078
- Passes additional parameters to the constructor of the appropriate
1079
- :class:`.ConnectionPool`. Useful for specifying things like
1080
- timeout, maxsize, headers, etc.
1081
-
1082
- Example::
1083
-
1084
- >>> conn = connection_from_url('http://google.com/')
1085
- >>> r = conn.request('GET', '/')
1086
- """
1087
- scheme, host, port = get_host(url)
1088
- port = port or port_by_scheme.get(scheme, 80)
1089
- if scheme == "https":
1090
- return HTTPSConnectionPool(host, port=port, **kw)
1091
- else:
1092
- return HTTPConnectionPool(host, port=port, **kw)
1093
-
1094
-
1095
- def _normalize_host(host, scheme):
1096
- """
1097
- Normalize hosts for comparisons and use with sockets.
1098
- """
1099
-
1100
- host = normalize_host(host, scheme)
1101
-
1102
- # httplib doesn't like it when we include brackets in IPv6 addresses
1103
- # Specifically, if we include brackets but also pass the port then
1104
- # httplib crazily doubles up the square brackets on the Host header.
1105
- # Instead, we need to make sure we never pass ``None`` as the port.
1106
- # However, for backward compatibility reasons we can't actually
1107
- # *assert* that. See http://bugs.python.org/issue28539
1108
- if host.startswith("[") and host.endswith("]"):
1109
- host = host[1:-1]
1110
- return host
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/modules.py DELETED
@@ -1,452 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- import commons
13
- from commons import init_weights, get_padding
14
- from transforms import piecewise_rational_quadratic_transform
15
- from attentions import Encoder
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
- class LayerNorm(nn.Module):
20
- def __init__(self, channels, eps=1e-5):
21
- super().__init__()
22
- self.channels = channels
23
- self.eps = eps
24
-
25
- self.gamma = nn.Parameter(torch.ones(channels))
26
- self.beta = nn.Parameter(torch.zeros(channels))
27
-
28
- def forward(self, x):
29
- x = x.transpose(1, -1)
30
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
31
- return x.transpose(1, -1)
32
-
33
- class ConvReluNorm(nn.Module):
34
- def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
35
- super().__init__()
36
- self.in_channels = in_channels
37
- self.hidden_channels = hidden_channels
38
- self.out_channels = out_channels
39
- self.kernel_size = kernel_size
40
- self.n_layers = n_layers
41
- self.p_dropout = p_dropout
42
- assert n_layers > 1, "Number of layers should be larger than 0."
43
-
44
- self.conv_layers = nn.ModuleList()
45
- self.norm_layers = nn.ModuleList()
46
- self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2))
47
- self.norm_layers.append(LayerNorm(hidden_channels))
48
- self.relu_drop = nn.Sequential(
49
- nn.ReLU(),
50
- nn.Dropout(p_dropout))
51
- for _ in range(n_layers-1):
52
- self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2))
53
- self.norm_layers.append(LayerNorm(hidden_channels))
54
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
55
- self.proj.weight.data.zero_()
56
- self.proj.bias.data.zero_()
57
-
58
- def forward(self, x, x_mask):
59
- x_org = x
60
- for i in range(self.n_layers):
61
- x = self.conv_layers[i](x * x_mask)
62
- x = self.norm_layers[i](x)
63
- x = self.relu_drop(x)
64
- x = x_org + self.proj(x)
65
- return x * x_mask
66
-
67
-
68
- class DDSConv(nn.Module):
69
- """
70
- Dialted and Depth-Separable Convolution
71
- """
72
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.):
73
- super().__init__()
74
- self.channels = channels
75
- self.kernel_size = kernel_size
76
- self.n_layers = n_layers
77
- self.p_dropout = p_dropout
78
-
79
- self.drop = nn.Dropout(p_dropout)
80
- self.convs_sep = nn.ModuleList()
81
- self.convs_1x1 = nn.ModuleList()
82
- self.norms_1 = nn.ModuleList()
83
- self.norms_2 = nn.ModuleList()
84
- for i in range(n_layers):
85
- dilation = kernel_size ** i
86
- padding = (kernel_size * dilation - dilation) // 2
87
- self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size,
88
- groups=channels, dilation=dilation, padding=padding
89
- ))
90
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
91
- self.norms_1.append(LayerNorm(channels))
92
- self.norms_2.append(LayerNorm(channels))
93
-
94
- def forward(self, x, x_mask, g=None):
95
- if g is not None:
96
- x = x + g
97
- for i in range(self.n_layers):
98
- y = self.convs_sep[i](x * x_mask)
99
- y = self.norms_1[i](y)
100
- y = F.gelu(y)
101
- y = self.convs_1x1[i](y)
102
- y = self.norms_2[i](y)
103
- y = F.gelu(y)
104
- y = self.drop(y)
105
- x = x + y
106
- return x * x_mask
107
-
108
-
109
- class WN(torch.nn.Module):
110
- def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0):
111
- super(WN, self).__init__()
112
- assert(kernel_size % 2 == 1)
113
- self.hidden_channels =hidden_channels
114
- self.kernel_size = kernel_size,
115
- self.dilation_rate = dilation_rate
116
- self.n_layers = n_layers
117
- self.gin_channels = gin_channels
118
- self.p_dropout = p_dropout
119
-
120
- self.in_layers = torch.nn.ModuleList()
121
- self.res_skip_layers = torch.nn.ModuleList()
122
- self.drop = nn.Dropout(p_dropout)
123
-
124
- if gin_channels != 0:
125
- cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1)
126
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
127
-
128
- for i in range(n_layers):
129
- dilation = dilation_rate ** i
130
- padding = int((kernel_size * dilation - dilation) / 2)
131
- in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size,
132
- dilation=dilation, padding=padding)
133
- in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
134
- self.in_layers.append(in_layer)
135
-
136
- # last one is not necessary
137
- if i < n_layers - 1:
138
- res_skip_channels = 2 * hidden_channels
139
- else:
140
- res_skip_channels = hidden_channels
141
-
142
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
143
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
144
- self.res_skip_layers.append(res_skip_layer)
145
-
146
- def forward(self, x, x_mask, g=None, **kwargs):
147
- output = torch.zeros_like(x)
148
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
149
-
150
- if g is not None:
151
- g = self.cond_layer(g)
152
-
153
- for i in range(self.n_layers):
154
- x_in = self.in_layers[i](x)
155
- if g is not None:
156
- cond_offset = i * 2 * self.hidden_channels
157
- g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:]
158
- else:
159
- g_l = torch.zeros_like(x_in)
160
-
161
- acts = commons.fused_add_tanh_sigmoid_multiply(
162
- x_in,
163
- g_l,
164
- n_channels_tensor)
165
- acts = self.drop(acts)
166
-
167
- res_skip_acts = self.res_skip_layers[i](acts)
168
- if i < self.n_layers - 1:
169
- res_acts = res_skip_acts[:,:self.hidden_channels,:]
170
- x = (x + res_acts) * x_mask
171
- output = output + res_skip_acts[:,self.hidden_channels:,:]
172
- else:
173
- output = output + res_skip_acts
174
- return output * x_mask
175
-
176
- def remove_weight_norm(self):
177
- if self.gin_channels != 0:
178
- torch.nn.utils.remove_weight_norm(self.cond_layer)
179
- for l in self.in_layers:
180
- torch.nn.utils.remove_weight_norm(l)
181
- for l in self.res_skip_layers:
182
- torch.nn.utils.remove_weight_norm(l)
183
-
184
-
185
- class ResBlock1(torch.nn.Module):
186
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
187
- super(ResBlock1, self).__init__()
188
- self.convs1 = nn.ModuleList([
189
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
190
- padding=get_padding(kernel_size, dilation[0]))),
191
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
192
- padding=get_padding(kernel_size, dilation[1]))),
193
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
194
- padding=get_padding(kernel_size, dilation[2])))
195
- ])
196
- self.convs1.apply(init_weights)
197
-
198
- self.convs2 = nn.ModuleList([
199
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
200
- padding=get_padding(kernel_size, 1))),
201
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
202
- padding=get_padding(kernel_size, 1))),
203
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
204
- padding=get_padding(kernel_size, 1)))
205
- ])
206
- self.convs2.apply(init_weights)
207
-
208
- def forward(self, x, x_mask=None):
209
- for c1, c2 in zip(self.convs1, self.convs2):
210
- xt = F.leaky_relu(x, LRELU_SLOPE)
211
- if x_mask is not None:
212
- xt = xt * x_mask
213
- xt = c1(xt)
214
- xt = F.leaky_relu(xt, LRELU_SLOPE)
215
- if x_mask is not None:
216
- xt = xt * x_mask
217
- xt = c2(xt)
218
- x = xt + x
219
- if x_mask is not None:
220
- x = x * x_mask
221
- return x
222
-
223
- def remove_weight_norm(self):
224
- for l in self.convs1:
225
- remove_weight_norm(l)
226
- for l in self.convs2:
227
- remove_weight_norm(l)
228
-
229
-
230
- class ResBlock2(torch.nn.Module):
231
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
232
- super(ResBlock2, self).__init__()
233
- self.convs = nn.ModuleList([
234
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
235
- padding=get_padding(kernel_size, dilation[0]))),
236
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
237
- padding=get_padding(kernel_size, dilation[1])))
238
- ])
239
- self.convs.apply(init_weights)
240
-
241
- def forward(self, x, x_mask=None):
242
- for c in self.convs:
243
- xt = F.leaky_relu(x, LRELU_SLOPE)
244
- if x_mask is not None:
245
- xt = xt * x_mask
246
- xt = c(xt)
247
- x = xt + x
248
- if x_mask is not None:
249
- x = x * x_mask
250
- return x
251
-
252
- def remove_weight_norm(self):
253
- for l in self.convs:
254
- remove_weight_norm(l)
255
-
256
-
257
- class Log(nn.Module):
258
- def forward(self, x, x_mask, reverse=False, **kwargs):
259
- if not reverse:
260
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
261
- logdet = torch.sum(-y, [1, 2])
262
- return y, logdet
263
- else:
264
- x = torch.exp(x) * x_mask
265
- return x
266
-
267
-
268
- class Flip(nn.Module):
269
- def forward(self, x, *args, reverse=False, **kwargs):
270
- x = torch.flip(x, [1])
271
- if not reverse:
272
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
273
- return x, logdet
274
- else:
275
- return x
276
-
277
-
278
- class ElementwiseAffine(nn.Module):
279
- def __init__(self, channels):
280
- super().__init__()
281
- self.channels = channels
282
- self.m = nn.Parameter(torch.zeros(channels,1))
283
- self.logs = nn.Parameter(torch.zeros(channels,1))
284
-
285
- def forward(self, x, x_mask, reverse=False, **kwargs):
286
- if not reverse:
287
- y = self.m + torch.exp(self.logs) * x
288
- y = y * x_mask
289
- logdet = torch.sum(self.logs * x_mask, [1,2])
290
- return y, logdet
291
- else:
292
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
293
- return x
294
-
295
-
296
- class ResidualCouplingLayer(nn.Module):
297
- def __init__(self,
298
- channels,
299
- hidden_channels,
300
- kernel_size,
301
- dilation_rate,
302
- n_layers,
303
- p_dropout=0,
304
- gin_channels=0,
305
- mean_only=False):
306
- assert channels % 2 == 0, "channels should be divisible by 2"
307
- super().__init__()
308
- self.channels = channels
309
- self.hidden_channels = hidden_channels
310
- self.kernel_size = kernel_size
311
- self.dilation_rate = dilation_rate
312
- self.n_layers = n_layers
313
- self.half_channels = channels // 2
314
- self.mean_only = mean_only
315
-
316
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
317
- self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels)
318
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
319
- self.post.weight.data.zero_()
320
- self.post.bias.data.zero_()
321
-
322
- def forward(self, x, x_mask, g=None, reverse=False):
323
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
324
- h = self.pre(x0) * x_mask
325
- h = self.enc(h, x_mask, g=g)
326
- stats = self.post(h) * x_mask
327
- if not self.mean_only:
328
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
329
- else:
330
- m = stats
331
- logs = torch.zeros_like(m)
332
-
333
- if not reverse:
334
- x1 = m + x1 * torch.exp(logs) * x_mask
335
- x = torch.cat([x0, x1], 1)
336
- logdet = torch.sum(logs, [1,2])
337
- return x, logdet
338
- else:
339
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
340
- x = torch.cat([x0, x1], 1)
341
- return x
342
-
343
-
344
- class ConvFlow(nn.Module):
345
- def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0):
346
- super().__init__()
347
- self.in_channels = in_channels
348
- self.filter_channels = filter_channels
349
- self.kernel_size = kernel_size
350
- self.n_layers = n_layers
351
- self.num_bins = num_bins
352
- self.tail_bound = tail_bound
353
- self.half_channels = in_channels // 2
354
-
355
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
356
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.)
357
- self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1)
358
- self.proj.weight.data.zero_()
359
- self.proj.bias.data.zero_()
360
-
361
- def forward(self, x, x_mask, g=None, reverse=False):
362
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
363
- h = self.pre(x0)
364
- h = self.convs(h, x_mask, g=g)
365
- h = self.proj(h) * x_mask
366
-
367
- b, c, t = x0.shape
368
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
369
-
370
- unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels)
371
- unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels)
372
- unnormalized_derivatives = h[..., 2 * self.num_bins:]
373
-
374
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
375
- unnormalized_widths,
376
- unnormalized_heights,
377
- unnormalized_derivatives,
378
- inverse=reverse,
379
- tails='linear',
380
- tail_bound=self.tail_bound
381
- )
382
-
383
- x = torch.cat([x0, x1], 1) * x_mask
384
- logdet = torch.sum(logabsdet * x_mask, [1,2])
385
- if not reverse:
386
- return x, logdet
387
- else:
388
- return x
389
- class TransformerCouplingLayer(nn.Module):
390
- def __init__(self,
391
- channels,
392
- hidden_channels,
393
- kernel_size,
394
- n_layers,
395
- n_heads,
396
- p_dropout=0,
397
- filter_channels=0,
398
- mean_only=False,
399
- wn_sharing_parameter=None,
400
- gin_channels = 0
401
- ):
402
- assert channels % 2 == 0, "channels should be divisible by 2"
403
- super().__init__()
404
- self.channels = channels
405
- self.hidden_channels = hidden_channels
406
- self.kernel_size = kernel_size
407
- self.n_layers = n_layers
408
- self.half_channels = channels // 2
409
- self.mean_only = mean_only
410
-
411
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
412
- self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter
413
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
414
- self.post.weight.data.zero_()
415
- self.post.bias.data.zero_()
416
-
417
- def forward(self, x, x_mask, g=None, reverse=False):
418
- x0, x1 = torch.split(x, [self.half_channels]*2, 1)
419
- h = self.pre(x0) * x_mask
420
- h = self.enc(h, x_mask, g=g)
421
- stats = self.post(h) * x_mask
422
- if not self.mean_only:
423
- m, logs = torch.split(stats, [self.half_channels]*2, 1)
424
- else:
425
- m = stats
426
- logs = torch.zeros_like(m)
427
-
428
- if not reverse:
429
- x1 = m + x1 * torch.exp(logs) * x_mask
430
- x = torch.cat([x0, x1], 1)
431
- logdet = torch.sum(logs, [1,2])
432
- return x, logdet
433
- else:
434
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
435
- x = torch.cat([x0, x1], 1)
436
- return x
437
-
438
- x1, logabsdet = piecewise_rational_quadratic_transform(x1,
439
- unnormalized_widths,
440
- unnormalized_heights,
441
- unnormalized_derivatives,
442
- inverse=reverse,
443
- tails='linear',
444
- tail_bound=self.tail_bound
445
- )
446
-
447
- x = torch.cat([x0, x1], 1) * x_mask
448
- logdet = torch.sum(logabsdet * x_mask, [1,2])
449
- if not reverse:
450
- return x, logdet
451
- else:
452
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BasToTheMax/TTS/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: TTS
3
- emoji: 🍕
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- license: other
9
- ---
10
- Hi
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Car Park.md DELETED
@@ -1,148 +0,0 @@
1
- <br />
2
- <h1>Aparcamiento: Consejos, trucos, tipos y beneficios</h1>
3
- <p>El aparcamiento es una habilidad esencial para cualquier conductor, pero también puede ser una fuente de frustración y estrés. Si usted está buscando un lugar en una ciudad llena de gente, tratando de adaptarse a su vehículo en un espacio reducido, o el uso de una nueva tecnología para aparcar su coche, es necesario saber algunos consejos y trucos para hacer su experiencia de estacionamiento más fácil y más seguro. En este artículo, exploraremos los diferentes tipos de sistemas de estacionamiento, sus ventajas y desventajas, y cómo usarlos de manera efectiva. También compartiremos algunos consejos útiles sobre cómo estacionar en un estacionamiento, ya sea perpendicular, en ángulo o paralelo. </p>
4
- <h2>Introducción</h2>
5
- <h3>¿Qué es el aparcamiento y por qué es importante? </h3>
6
- <p>El estacionamiento es el acto de colocar un vehículo en un área designada por un período de tiempo. El aparcamiento se puede hacer en la calle, en un garaje, en un lote o en una estructura. El aparcamiento es importante por varias razones:</p>
7
- <h2>car park</h2><br /><p><b><b>DOWNLOAD</b> &#10002; <a href="https://bltlly.com/2v6JuE">https://bltlly.com/2v6JuE</a></b></p><br /><br />
8
- <ul>
9
- <li>Ayuda a reducir la congestión del tráfico y la contaminación al minimizar el número de vehículos en la carretera. </li>
10
- <li>Proporciona comodidad y accesibilidad para los conductores y pasajeros que necesitan llegar a sus destinos. </li>
11
- <li>Garantiza la seguridad de los vehículos y sus propietarios al evitar robos, vandalismo o daños. </li>
12
- <li>Genera ingresos para empresas, municipios u operadores que cobran tarifas por servicios de estacionamiento. </li>
13
- </ul>
14
- <h3>¿Cuáles son los desafíos y soluciones comunes de estacionamiento de automóviles? </h3>
15
- <p>El aparcamiento también puede plantear algunos desafíos para los conductores, especialmente en las zonas urbanas donde el espacio es limitado y la demanda es alta. Algunos de los desafíos comunes de estacionamiento son:</p>
16
- <ul>
17
- <li>Escasez de plazas de aparcamiento: Puede que no haya suficientes plazas de aparcamiento disponibles para el número de vehículos que las necesitan. </li>
18
- <li>Tarifas de estacionamiento altas: El costo de estacionamiento puede ser demasiado caro para algunos conductores o disuadirlos de visitar ciertas áreas. </li>
19
-
20
- <li>Violaciones de estacionamiento: Algunos conductores pueden estacionar ilegal o incorrectamente, bloqueando otros vehículos o peatones. </li>
21
- </ul>
22
- <p>Afortunadamente, hay algunas soluciones que pueden ayudar a abordar estos desafíos, como:</p>
23
- <ul>
24
- <li>Gestión del estacionamiento: Esto implica la planificación, regulación y aplicación de políticas y prácticas de estacionamiento para optimizar el uso de los espacios y recursos existentes. </li>
25
- <li>Guía de estacionamiento: Esto implica proporcionar información e instrucciones a los conductores sobre la disponibilidad y ubicación de las plazas de estacionamiento. </li>
26
- <li>Tecnología de estacionamiento: Esto implica el uso de sistemas y dispositivos innovadores para automatizar, simplificar o mejorar el proceso de estacionamiento. </li>
27
- </ul>
28
- <h2>Consejos y trucos para aparcar coches</h2>
29
- <h3>Cómo estacionar en un estacionamiento</h3>
30
- <p>Estacionar en un estacionamiento puede ser complicado si no sabes cómo maniobrar tu vehículo correctamente. Aquí hay algunos consejos y trucos sobre cómo estacionar en un estacionamiento dependiendo del tipo de espacio:</p>
31
- <p></p>
32
- <h4>Estacionamiento perpendicular</h4>
33
- <p>Esto es cuando estaciona su vehículo en un ángulo recto a la acera o la pared. Para hacer esto:</p>
34
- <ol>
35
- <li>Coloque el parachoques de su coche con la primera línea de la plaza de aparcamiento. Mantenga su vehículo lo más lejos posible del lado opuesto para que tenga más espacio para girar. </li>
36
- <li>Levante los frenos y gire gradualmente el volante hacia la dirección del espacio. <li>Continúe girando hasta que su coche esté alineado con el espacio. Asegúrese de no golpear la acera o los otros vehículos. </li>
37
- <li>Enderezar las ruedas y avanzar lentamente hasta que el coche está completamente dentro del espacio. Deje suficiente espacio para que usted y los otros conductores abran las puertas. </li>
38
- <li>Aparca tu coche y apaga el motor. Has aparcado tu coche correctamente perpendicularmente. </li>
39
- </ol>
40
- <h4>Aparcamiento en ángulo</h4>
41
- <p>Esto es cuando estacionas tu vehículo en un ángulo de la acera o la pared. Para hacer esto:</p>
42
- <ol>
43
-
44
- <li>Levante los frenos y gire suavemente el volante hacia la dirección del espacio. </li>
45
- <li>Continúe girando hasta que su automóvil esté paralelo a las líneas del espacio. Asegúrese de no sobrepasar o subestimar el espacio. </li>
46
- <li>Enderezar las ruedas y avanzar lentamente hasta que el coche está completamente dentro del espacio. Deje suficiente espacio para que usted y los otros conductores abran las puertas. </li>
47
- <li>Ponga su coche en el parque y apague el motor. Usted ha aparcado con éxito su coche en un ángulo. </li>
48
- </ol>
49
- <h4>Estacionamiento paralelo</h4>
50
- <p>Esto es cuando estacionas tu vehículo paralelo a la acera o a la pared. Para hacer esto:</p>
51
- <ol>
52
- <li>Encuentre un espacio de estacionamiento que sea lo suficientemente grande para su automóvil. Idealmente, debe ser al menos una vez y media la longitud de su automóvil. </li>
53
- <li>Tire hacia arriba al lado del coche en frente del espacio. Alinee su parachoques trasero con su parachoques trasero y dejar unos dos pies de espacio entre sus coches. </li>
54
- <li>Ponga su coche en marcha atrás y lentamente hacia arriba. Gire el volante todo el camino a la derecha (o izquierda, dependiendo de qué lado está estacionando). </li>
55
- <li>Continúe retrocediendo hasta que su parachoques delantero esté más allá del parachoques trasero del automóvil frente a usted. Asegúrese de no golpear su automóvil o la acera. </li>
56
- <li> Gire rápidamente el volante todo el camino a la izquierda (o derecha, dependiendo de qué lado está estacionando). </li>
57
- <li>Continúe retrocediendo hasta que su automóvil esté paralelo a la acera o la pared. Asegúrese de no golpear el automóvil detrás de usted o ir demasiado atrás. </li>
58
- <li>Enderece sus ruedas y ajuste su posición si es necesario. Deje suficiente espacio para que usted y los otros conductores salgan e ingresen sus autos. </li>
59
- <li>Aparca tu coche y apaga el motor. Has aparcado tu coche de forma paralela. </li>
60
- </ol>
61
- <h3>Cómo usar sistemas de estacionamiento automático</h3>
62
-
63
- <h4>APS completamente automatizado</h4>
64
- <p>Este tipo de APS puede aparcar su coche sin ninguna entrada de usted. Todo lo que necesita hacer es conducir su coche en un área designada, salir y activar el sistema con una tarjeta, un código o una aplicación de teléfono inteligente. El sistema escaneará su automóvil, lo levantará y lo transportará a un espacio de estacionamiento disponible utilizando una cinta transportadora, un brazo robótico o un transbordador. Cuando desee recuperar su coche, solo tiene que activar el sistema de nuevo con el mismo método, y traerá su coche de vuelta a usted en unos minutos. </p>
65
- <h4>APS semiautomático</h4>
66
- <p>Este tipo de APS puede ayudarle a estacionar su automóvil proporcionando orientación, control o asistencia. Todavía tiene que conducir su coche en un área designada, pero luego puede elegir una de estas opciones:</p>
67
- <h5> Sistema de estacionamiento de rompecabezas de elevación y deslizamiento</h5>
68
- <p>Este sistema utiliza un elevador hidráulico y una plataforma deslizante para mover y almacenar vehículos en una disposición similar a la red. Puede estacionar su automóvil en una plataforma vacía, salir y activar el sistema con una tarjeta, un código o una aplicación de teléfono inteligente. El sistema luego levantará y deslizará su automóvil a un espacio de estacionamiento disponible. Cuando desee recuperar su coche, solo tiene que activar el sistema de nuevo con el mismo método, y se levantará y deslizar su coche de nuevo a usted. </p>
69
- <h5>Sistema de estacionamiento en boxes</h5>
70
- <p>Este sistema utiliza un elevador vertical y una plataforma giratoria horizontal para mover y almacenar vehículos en un pozo subterráneo. Puede estacionar su automóvil en un tocadiscos vacío, salir y activar el sistema con una tarjeta, un código o una aplicación de teléfono inteligente. El sistema entonces bajará su coche en el hoyo y girarlo a un espacio de estacionamiento disponible. Cuando desee recuperar su coche, solo tiene que activar el sistema de nuevo con el mismo método, y se levantará y girar el coche de nuevo a usted. </p>
71
- <h2>Tipos de estacionamiento y beneficios</h2>
72
- <h3>Ascensores de estacionamiento dependientes (apiladores de automóviles)</h3>
73
-
74
- <h3>Sistemas de estacionamiento semiautomáticos</h3>
75
- <p>Este tipo de sistema de estacionamiento utiliza una combinación de dispositivos mecánicos y electrónicos para mover y almacenar vehículos en una disposición horizontal o vertical. Puede aumentar la capacidad de estacionamiento de tres a seis veces, dependiendo del número de niveles y espacios. Es semi-independiente, lo que significa que algunos vehículos se puede acceder directamente, mientras que otros pueden requerir el movimiento de otros vehículos primero. Este tipo de sistema es adecuado para áreas de tráfico medio o estacionamiento a corto plazo. </p>
76
- <h4> Sistema de estacionamiento de rompecabezas de elevación y deslizamiento</h4>
77
- <p>Este es un tipo de sistema de estacionamiento semiautomático que ya hemos discutido en la sección anterior. Tiene los siguientes beneficios:</p>
78
- <ul>
79
- <li> Es flexible y adaptable, ya que puede adaptarse a diferentes tamaños y formas de vehículos y espacios. </li>
80
- <li> Es eficiente y rápido, ya que puede mover vehículos en pocos minutos. </li>
81
- <li> Es seguro, ya que evita el acceso no autorizado y daños a los vehículos. </li>
82
- </ul>
83
- <h4>Sistema de estacionamiento en boxes</h4>
84
- <p>Este es otro tipo de sistema de estacionamiento semiautomático que ya hemos discutido en la sección anterior. Tiene los siguientes beneficios:</p>
85
- <ul>
86
- <li> Ahorra espacio y es estético, ya que esconde los vehículos bajo tierra y preserva el paisaje. </li>
87
- <li> Es ecológico y ahorra energía, ya que reduce las emisiones y la contaminación acústica y utiliza menos electricidad. </li>
88
- <li> Es confiable y duradero, ya que tiene un bajo costo de mantenimiento y una larga vida útil. </li>
89
- </ul>
90
- <h3>Sistemas de estacionamiento totalmente automáticos</h3>
91
- <p>Este tipo de sistema de estacionamiento utiliza un proceso totalmente automatizado para mover y almacenar vehículos en una disposición horizontal o vertical. Puede aumentar la capacidad de estacionamiento de seis a diez veces, dependiendo del número de niveles y espacios. Es independiente, lo que significa que se puede acceder a cualquier vehículo en cualquier momento sin mover otros vehículos. Este tipo de sistema es adecuado para áreas de alto tráfico o estacionamiento premium. </p>
92
-
93
- <p>Este tipo de sistema de estacionamiento totalmente automático utiliza una plataforma giratoria para mover y almacenar vehículos en una disposición circular. Tiene los siguientes beneficios:</p>
94
- <ul>
95
- <li> Es simple y conveniente, ya que requiere un espacio y operación mínimos. </li>
96
- <li> Es económico y asequible, ya que tiene un bajo costo de instalación y un alto retorno de la inversión. </li>
97
- <li> Es divertido y atractivo, ya que añade una característica única al edificio o al área. </li>
98
- </ul>
99
- <h4>Sistema de estacionamiento de la torre</h4>
100
- <p>Este tipo de sistema de estacionamiento totalmente automático utiliza un ascensor vertical para mover y almacenar vehículos en una estructura similar a una torre. Tiene los siguientes beneficios:</p>
101
- <ul>
102
- <li>Es innovador y avanzado, ya que utiliza tecnología y diseño de vanguardia. </li>
103
- <li> Es espacioso y cómodo, ya que proporciona un amplio espacio para cada vehículo y elimina el error humano. </li>
104
- <li> Es inteligente e inteligente, ya que puede monitorear y controlar el estado y el rendimiento del estacionamiento. </li>
105
- </ul>
106
- <h2>Conclusión</h2>
107
- <h3>Resumen de los puntos principales</h3>
108
- <p>En conclusión, el estacionamiento es una habilidad importante que todo conductor debe dominar. Puede ayudarle a ahorrar tiempo, dinero y energía, así como evitar accidentes, multas o estrés. En este artículo, hemos cubierto algunos consejos y trucos sobre cómo estacionar en un estacionamiento, ya sea perpendicular, en ángulo o paralelo. También hemos introducido algunos tipos de sistemas de aparcamiento, sus ventajas y desventajas, y cómo utilizarlos de manera eficaz. Esperamos que este artículo haya sido informativo y útil para usted. </p>
109
- <h3>Llamada a la acción</h3>
110
- <p>Si desea obtener más información sobre el estacionamiento de automóviles o encontrar las mejores soluciones de estacionamiento para sus necesidades, visite nuestro sitio web o contáctenos hoy. Somos expertos en sistemas de aparcamiento y podemos ofrecerle asesoramiento profesional, instalación, mantenimiento y soporte. ¡Esperamos saber de usted pronto! </p>
111
- <h2>Preguntas frecuentes</h2>
112
- <ol>
113
-
114
- <p>Los sistemas de estacionamiento automático pueden ofrecer muchos beneficios para los conductores, como:</p>
115
- <ul>
116
- <li>Pueden ahorrar espacio, tiempo y energía optimizando el uso de las áreas de estacionamiento existentes y reduciendo la necesidad de intervención humana. </li>
117
- <li>Pueden mejorar la seguridad y la protección al prevenir accidentes, robos, vandalismo o daños a vehículos. </li>
118
- <li>Pueden mejorar la comodidad y el confort al simplificar el proceso de estacionamiento y eliminar la molestia de encontrar un lugar de estacionamiento. </li>
119
- </ul>
120
- <li>¿Cuáles son las desventajas de usar sistemas de estacionamiento automático? </li>
121
- <p>Los sistemas de estacionamiento automático también pueden tener algunos inconvenientes, como:</p>
122
- <ul>
123
- <li> Pueden ser costosos y complejos de instalar, operar y mantener. </li>
124
- <li>Pueden ser propensos a fallas técnicas o mal funcionamiento que pueden afectar el rendimiento o la disponibilidad del estacionamiento. </li>
125
- <li>Pueden ser incompatibles o inaccesibles para algunos vehículos o conductores que no tienen la tecnología o el equipo requerido. </li>
126
- </ul>
127
- <li>¿Cómo puedo mejorar mis habilidades de estacionamiento? </li>
128
- <p>Las habilidades de estacionamiento de automóviles se pueden mejorar practicando regularmente y siguiendo algunos consejos, como:</p>
129
- <ul>
130
- <li>Ajuste los espejos y el asiento para tener una visión clara de su entorno. </li>
131
- <li>Utilice sus indicadores y compruebe sus puntos ciegos antes de girar o cambiar de carril. </li>
132
- <li>Estacione en un área bien iluminada y segura que sea adecuada para el tamaño y tipo de su vehículo. </li>
133
- <li>Utilice puntos de referencia y directrices para alinear su vehículo con el espacio de estacionamiento. </li>
134
- <li>Deja suficiente espacio entre tu coche y los otros vehículos u obstáculos. </li>
135
- </ul>
136
- <li>¿Cuáles son algunas reglas de etiqueta de estacionamiento que debo seguir? </li>
137
- <p>Las reglas de etiqueta de estacionamiento son algunas normas no escritas que pueden ayudarlo a respetar a otros conductores y evitar conflictos, como:</p>
138
- <ul>
139
- <li>Estacione dentro de las líneas del espacio de estacionamiento y no ocupe más de un espacio. </li>
140
- <li>No estacione en espacios reservados, deshabilitados o de emergencia a menos que esté autorizado a hacerlo. </li>
141
-
142
- <li>No tocar la bocina, rev, o reproducir música fuerte mientras se estaciona o espera un espacio. </li>
143
- <li>No deje objetos de valor o basura en su coche o en el suelo. </li>
144
- </ul>
145
- <li>¿Dónde puedo encontrar más información sobre el aparcamiento? </li>
146
- <p>Puede encontrar más información sobre el estacionamiento de coches visitando nuestro sitio web o contactándonos hoy. Tenemos una gran cantidad de recursos y expertos que pueden ayudarle con cualquier pregunta o necesidades de estacionamiento de automóviles. También podemos proporcionarle las mejores soluciones de estacionamiento para su situación específica. ¡Estamos encantados de ayudarle de cualquier manera que podamos! </p> 64aa2da5cf<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Destino Final Mod Apk.md DELETED
@@ -1,58 +0,0 @@
1
-
2
- <h1>Descargar Final Destiny Mod APK: Un juego de aventura de fantasía</h1>
3
- <p>Si estás buscando un emocionante e inmersivo juego de aventura de fantasía, deberías probar Final Destiny. Este juego te llevará a un mundo donde tú y una guerrera tienen que luchar contra varios enemigos y peligros después de rescatar a un bebé. También podrá disfrutar de los impresionantes gráficos, los controles suaves y la banda sonora épica de este juego. ¿Pero qué pasa si quieres divertirte más y ser más cómodo mientras juegas a Final Destiny? Bueno, se puede descargar Final Destiny Mod APK y obtener acceso a dinero ilimitado, modo dios, y no hay anuncios. En este artículo, te diremos todo lo que necesitas saber sobre Final Destiny y su versión mod apk. </p>
4
- <h2>¿Qué es el destino final? </h2>
5
- <p>Final Destiny es un juego de acción y aventura desarrollado por YEMA y lanzado en 2020. Tiene más de 1 millón de descargas en Google Play Store y una calificación de 4.4 de 5 estrellas. El juego es compatible con dispositivos Android con la versión 4.4 o superior. </p>
6
- <h2>descargar destino final mod apk</h2><br /><p><b><b>Download Zip</b> &#9999; <a href="https://bltlly.com/2v6MfK">https://bltlly.com/2v6MfK</a></b></p><br /><br />
7
- <h3>La historia del destino final</h3>
8
- <p>El juego comienza con un evento misterioso que causa una gran explosión en el cielo. Eres una guerrera que está cerca de la escena y es testigo del desastre. También encuentras una niña que milagrosamente sobrevive a la explosión. Decides llevarla contigo y protegerla de los peligros que acechan en este mundo caótico. En el camino, te encontrarás con muchos enemigos, como monstruos, robots, zombies y alienígenas. También descubrirá los secretos detrás de la explosión y el origen de la niña. </p>
9
- <h3>El juego de Final Destiny</h3>
10
-
11
- <h3>Las características de Final Destiny</h3>
12
- <p>Algunas de las características que hacen de Final Destiny un juego increíble son:</p>
13
- <ul>
14
- <li>Impresionantes gráficos y animaciones que crean un mundo de fantasía realista e inmersivo. </li>
15
- <li>Controles suaves y física que le permiten realizar varias acciones y movimientos. </li>
16
- <li>Banda sonora épica y efectos de sonido que mejoran la atmósfera y el estado de ánimo del juego. </li>
17
- <li>Diversos enemigos y jefes que desafían tus habilidades y estrategias. </li>
18
- <li>Múltiples armas y habilidades que se adaptan a sus preferencias y estilo de juego. </li>
19
- <li>Varios trajes y accesorios que te permiten personalizar el aspecto de tu personaje. </li>
20
- <li>Interfaz de usuario simple e intuitiva que hace que el juego sea fácil de navegar y jugar. </li>
21
- </ul>
22
- <h2>¿Por qué descargar Final Destiny Mod APK? </h2>
23
- <p>Aunque Final Destiny es un juego divertido y emocionante, también tiene algunas limitaciones y desventajas que pueden afectar tu experiencia de juego. Por ejemplo, puedes quedarte sin dinero o gemas para mejorar tus armas o habilidades. También puedes encontrar algunas etapas o niveles demasiado difíciles o frustrantes para completarlos. También puedes enojarte con los anuncios que aparecen de vez en cuando. Es por eso que es posible que desee descargar Final Destiny Mod APK en lugar de la versión original. Esta versión apk mod le dará algunas ventajas y beneficios que harán que su experiencia de juego más agradable y conveniente. </p>
24
- <h3>Dinero ilimitado</h3>
25
- <p>Con Final Destiny Mod APK, usted tendrá dinero ilimitado en su cuenta. Esto significa que puede comprar cualquier arma o habilidad que desee sin preocuparse por el costo o la disponibilidad. También puede actualizar sus armas y habilidades al máximo nivel sin ningún tipo de molestia. Esto hará que tu personaje sea más poderoso y capaz de derrotar a cualquier enemigo o jefe. </p>
26
- <h3>Modo de Dios</h3>
27
-
28
- <h3>No hay anuncios</h3>
29
- <p>Con Final Destiny Mod APK, también se deshará de los molestos anuncios que interrumpen su juego. No verás ningún banner, pop-ups o videos que intenten venderte algo o hacerte ver algo. Tampoco tendrá que esperar a que ningún temporizador o cuenta atrás para reanudar su juego. Esto hará que tu juego sea más ininterrumpido y agradable, ya que puedes jugar sin distracciones ni retrasos. </p>
30
- <h2>¿Cómo descargar e instalar Final Destiny Mod APK? </h2>
31
- <p>Si está interesado en descargar e instalar Final Destiny Mod APK, puede seguir estos sencillos pasos:</p>
32
- <h3>Paso 1: Descargar el archivo apk mod de una fuente de confianza</h3>
33
- <p>Lo primero que tienes que hacer es encontrar un sitio web confiable y seguro que ofrece el archivo apk mod de Final Destiny. Puede buscarlo en Google o utilizar el enlace que proporcionamos a continuación. Asegúrese de que el sitio web sea seguro y tenga comentarios positivos de otros usuarios. Evite descargar de fuentes desconocidas o sospechosas que puedan contener virus o malware. </p>
34
- <p>Una vez que encuentre el sitio web, haga clic en el botón de descarga y espere a que el archivo se descargue en su dispositivo. El tamaño del archivo es de unos 100 MB, así que asegúrese de tener suficiente espacio de almacenamiento y una conexión a Internet estable. </p>
35
- <p></p>
36
- <h3>Paso 2: Habilitar fuentes desconocidas en el dispositivo</h3>
37
- <p>Lo siguiente que debe hacer es habilitar fuentes desconocidas en su dispositivo. Esta es una configuración de seguridad que le permite instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, busque la opción de fuentes desconocidas y conéctela. Puede ver un mensaje de advertencia que le informa sobre los riesgos de instalar aplicaciones desde fuentes desconocidas. Simplemente ignórelo y confirme su elección. </p>
38
- <h3>Paso 3: Instalar el archivo apk mod y disfrutar del juego</h3>
39
-
40
- <p>Una vez realizada la instalación, puedes abrir el juego y empezar a jugar con dinero ilimitado, modo dios y sin anuncios. ¡Diviértete! </p>
41
- <h2>Conclusión</h2>
42
- <p>Final Destiny es un juego de aventura de fantasía que te llevará a un mundo donde tendrás que luchar contra varios enemigos y peligros después de rescatar a una niña. Usted podrá disfrutar de los gráficos impresionantes, los controles suaves, y la banda sonora épica de este juego. Pero si quieres tener más diversión y comodidad mientras juegas Final Destiny, puedes descargar Final Destiny Mod APK y obtener acceso a dinero ilimitado, modo dios, y sin anuncios. Esta versión apk mod hará que su experiencia de juego más agradable y conveniente. </p>
43
- <p>Esperamos que este artículo te haya ayudado a aprender todo lo que necesitas saber sobre Final Destiny y su versión mod apk. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer! </p>
44
- <h2>Preguntas frecuentes</h2>
45
- <ul>
46
- <li><b> ¿Es seguro descargar e instalar Final Destiny Mod APK? </b></li>
47
- <p>Sí, Final Destiny Mod APK es seguro para descargar e instalar siempre y cuando lo obtenga de una fuente de confianza. Lo hemos probado nosotros mismos y no encontramos virus o malware en él. Sin embargo, todavía recomendamos que lo escanee con una aplicación antivirus antes de instalarlo. </p>
48
- <li><b>¿Es Final Destiny Mod APK compatible con mi dispositivo? </b></li>
49
- <p>Final Destiny Mod APK es compatible con dispositivos Android con la versión 4.4 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o funciones del juego debido a limitaciones de hardware o problemas de software. </p>
50
- <li><b>¿Puedo jugar Final Destiny Mod APK en línea con otros jugadores? </b></li>
51
- <p>No, Final Destiny Mod APK no es un juego en línea. Es un juego para un solo jugador que se puede jugar sin conexión a Internet. También puedes reproducirlo en un emulador en tu PC o portátil. </p>
52
- <li><b>¿Puedo actualizar Final Destiny Mod APK a la última versión? </b></li>
53
-
54
- <li><b>¿Puedo usar Final Destiny Mod APK con la versión original del juego? </b></li>
55
- <p>No, no se puede utilizar Final Destiny Mod APK con la versión original del juego. Usted tiene que desinstalar la versión original del juego antes de instalar el archivo apk mod. De lo contrario, puede encontrar errores o conflictos que pueden impedir que el juego se ejecute correctamente. </p>
56
- </ul></p> 64aa2da5cf<br />
57
- <br />
58
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/misc/coord.py DELETED
@@ -1,31 +0,0 @@
1
- import torch
2
-
3
- class CoordStage(object):
4
- def __init__(self, n_embed, down_factor):
5
- self.n_embed = n_embed
6
- self.down_factor = down_factor
7
-
8
- def eval(self):
9
- return self
10
-
11
- def encode(self, c):
12
- """fake vqmodel interface"""
13
- assert 0.0 <= c.min() and c.max() <= 1.0
14
- b,ch,h,w = c.shape
15
- assert ch == 1
16
-
17
- c = torch.nn.functional.interpolate(c, scale_factor=1/self.down_factor,
18
- mode="area")
19
- c = c.clamp(0.0, 1.0)
20
- c = self.n_embed*c
21
- c_quant = c.round()
22
- c_ind = c_quant.to(dtype=torch.long)
23
-
24
- info = None, None, c_ind
25
- return c_quant, None, info
26
-
27
- def decode(self, c):
28
- c = c/self.n_embed
29
- c = torch.nn.functional.interpolate(c, scale_factor=self.down_factor,
30
- mode="nearest")
31
- return c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/styles/main.css DELETED
@@ -1,17 +0,0 @@
1
- @import "./highlight-js.css";
2
-
3
- @tailwind base;
4
- @tailwind components;
5
- @tailwind utilities;
6
-
7
- @layer components {
8
- .btn {
9
- @apply inline-flex flex-shrink-0 cursor-pointer select-none items-center justify-center whitespace-nowrap outline-none transition-all focus:ring disabled:cursor-default;
10
- }
11
- }
12
-
13
- @layer utilities {
14
- .scrollbar-custom {
15
- @apply scrollbar-thin scrollbar-track-transparent scrollbar-thumb-black/10 scrollbar-thumb-rounded-full scrollbar-w-1 hover:scrollbar-thumb-black/20 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20;
16
- }
17
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/complex/cpow.h DELETED
@@ -1,55 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- * Copyright 2013 Filipe RNC Maia
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- #pragma once
19
-
20
- #include <thrust/complex.h>
21
- #include <thrust/detail/type_traits.h>
22
-
23
- namespace thrust {
24
-
25
- template <typename T0, typename T1>
26
- __host__ __device__
27
- complex<typename detail::promoted_numerical_type<T0, T1>::type>
28
- pow(const complex<T0>& x, const complex<T1>& y)
29
- {
30
- typedef typename detail::promoted_numerical_type<T0, T1>::type T;
31
- return exp(log(complex<T>(x)) * complex<T>(y));
32
- }
33
-
34
- template <typename T0, typename T1>
35
- __host__ __device__
36
- complex<typename detail::promoted_numerical_type<T0, T1>::type>
37
- pow(const complex<T0>& x, const T1& y)
38
- {
39
- typedef typename detail::promoted_numerical_type<T0, T1>::type T;
40
- return exp(log(complex<T>(x)) * T(y));
41
- }
42
-
43
- template <typename T0, typename T1>
44
- __host__ __device__
45
- complex<typename detail::promoted_numerical_type<T0, T1>::type>
46
- pow(const T0& x, const complex<T1>& y)
47
- {
48
- typedef typename detail::promoted_numerical_type<T0, T1>::type T;
49
- // Find `log` by ADL.
50
- using std::log;
51
- return exp(log(T(x)) * complex<T>(y));
52
- }
53
-
54
- } // end namespace thrust
55
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/dependencies_aware_execution_policy.h DELETED
@@ -1,105 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/cpp11_required.h>
21
-
22
- #if THRUST_CPP_DIALECT >= 2011
23
-
24
- #include <tuple>
25
-
26
- #include <thrust/detail/execute_with_dependencies.h>
27
-
28
- namespace thrust
29
- {
30
- namespace detail
31
- {
32
-
33
- template<template<typename> class ExecutionPolicyCRTPBase>
34
- struct dependencies_aware_execution_policy
35
- {
36
- template<typename ...Dependencies>
37
- __host__
38
- thrust::detail::execute_with_dependencies<
39
- ExecutionPolicyCRTPBase,
40
- Dependencies...
41
- >
42
- after(Dependencies&& ...dependencies) const
43
- {
44
- return { capture_as_dependency(THRUST_FWD(dependencies))... };
45
- }
46
-
47
- template<typename ...Dependencies>
48
- __host__
49
- thrust::detail::execute_with_dependencies<
50
- ExecutionPolicyCRTPBase,
51
- Dependencies...
52
- >
53
- after(std::tuple<Dependencies...>& dependencies) const
54
- {
55
- return { capture_as_dependency(dependencies) };
56
- }
57
- template<typename ...Dependencies>
58
- __host__
59
- thrust::detail::execute_with_dependencies<
60
- ExecutionPolicyCRTPBase,
61
- Dependencies...
62
- >
63
- after(std::tuple<Dependencies...>&& dependencies) const
64
- {
65
- return { capture_as_dependency(std::move(dependencies)) };
66
- }
67
-
68
- template<typename ...Dependencies>
69
- __host__
70
- thrust::detail::execute_with_dependencies<
71
- ExecutionPolicyCRTPBase,
72
- Dependencies...
73
- >
74
- rebind_after(Dependencies&& ...dependencies) const
75
- {
76
- return { capture_as_dependency(THRUST_FWD(dependencies))... };
77
- }
78
-
79
- template<typename ...Dependencies>
80
- __host__
81
- thrust::detail::execute_with_dependencies<
82
- ExecutionPolicyCRTPBase,
83
- Dependencies...
84
- >
85
- rebind_after(std::tuple<Dependencies...>& dependencies) const
86
- {
87
- return { capture_as_dependency(dependencies) };
88
- }
89
- template<typename ...Dependencies>
90
- __host__
91
- thrust::detail::execute_with_dependencies<
92
- ExecutionPolicyCRTPBase,
93
- Dependencies...
94
- >
95
- rebind_after(std::tuple<Dependencies...>&& dependencies) const
96
- {
97
- return { capture_as_dependency(std::move(dependencies)) };
98
- }
99
- };
100
-
101
- } // end detail
102
- } // end thrust
103
-
104
- #endif // THRUST_CPP_DIALECT >= 2011
105
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/unique.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits unique
22
- #include <thrust/system/detail/sequential/unique.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/uninitialized_copy.h DELETED
@@ -1,44 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a fill of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // the purpose of this header is to #include the uninitialized_copy.h header
22
- // of the sequential, host, and device systems. It should be #included in any
23
- // code which uses adl to dispatch uninitialized_copy
24
-
25
- #include <thrust/system/detail/sequential/uninitialized_copy.h>
26
-
27
- // SCons can't see through the #defines below to figure out what this header
28
- // includes, so we fake it out by specifying all possible files we might end up
29
- // including inside an #if 0.
30
- #if 0
31
- #include <thrust/system/cpp/detail/uninitialized_copy.h>
32
- #include <thrust/system/cuda/detail/uninitialized_copy.h>
33
- #include <thrust/system/omp/detail/uninitialized_copy.h>
34
- #include <thrust/system/tbb/detail/uninitialized_copy.h>
35
- #endif
36
-
37
- #define __THRUST_HOST_SYSTEM_UNINITIALIZED_COPY_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/uninitialized_copy.h>
38
- #include __THRUST_HOST_SYSTEM_UNINITIALIZED_COPY_HEADER
39
- #undef __THRUST_HOST_SYSTEM_UNINITIALIZED_COPY_HEADER
40
-
41
- #define __THRUST_DEVICE_SYSTEM_UNINITIALIZED_COPY_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/uninitialized_copy.h>
42
- #include __THRUST_DEVICE_SYSTEM_UNINITIALIZED_COPY_HEADER
43
- #undef __THRUST_DEVICE_SYSTEM_UNINITIALIZED_COPY_HEADER
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/pascal_voc_evaluation.py DELETED
@@ -1,300 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import logging
5
- import numpy as np
6
- import os
7
- import tempfile
8
- import xml.etree.ElementTree as ET
9
- from collections import OrderedDict, defaultdict
10
- from functools import lru_cache
11
- import torch
12
-
13
- from detectron2.data import MetadataCatalog
14
- from detectron2.utils import comm
15
- from detectron2.utils.file_io import PathManager
16
-
17
- from .evaluator import DatasetEvaluator
18
-
19
-
20
- class PascalVOCDetectionEvaluator(DatasetEvaluator):
21
- """
22
- Evaluate Pascal VOC style AP for Pascal VOC dataset.
23
- It contains a synchronization, therefore has to be called from all ranks.
24
-
25
- Note that the concept of AP can be implemented in different ways and may not
26
- produce identical results. This class mimics the implementation of the official
27
- Pascal VOC Matlab API, and should produce similar but not identical results to the
28
- official API.
29
- """
30
-
31
- def __init__(self, dataset_name):
32
- """
33
- Args:
34
- dataset_name (str): name of the dataset, e.g., "voc_2007_test"
35
- """
36
- self._dataset_name = dataset_name
37
- meta = MetadataCatalog.get(dataset_name)
38
-
39
- # Too many tiny files, download all to local for speed.
40
- annotation_dir_local = PathManager.get_local_path(
41
- os.path.join(meta.dirname, "Annotations/")
42
- )
43
- self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
44
- self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
45
- self._class_names = meta.thing_classes
46
- assert meta.year in [2007, 2012], meta.year
47
- self._is_2007 = meta.year == 2007
48
- self._cpu_device = torch.device("cpu")
49
- self._logger = logging.getLogger(__name__)
50
-
51
- def reset(self):
52
- self._predictions = defaultdict(list) # class name -> list of prediction strings
53
-
54
- def process(self, inputs, outputs):
55
- for input, output in zip(inputs, outputs):
56
- image_id = input["image_id"]
57
- instances = output["instances"].to(self._cpu_device)
58
- boxes = instances.pred_boxes.tensor.numpy()
59
- scores = instances.scores.tolist()
60
- classes = instances.pred_classes.tolist()
61
- for box, score, cls in zip(boxes, scores, classes):
62
- xmin, ymin, xmax, ymax = box
63
- # The inverse of data loading logic in `datasets/pascal_voc.py`
64
- xmin += 1
65
- ymin += 1
66
- self._predictions[cls].append(
67
- f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
68
- )
69
-
70
- def evaluate(self):
71
- """
72
- Returns:
73
- dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
74
- """
75
- all_predictions = comm.gather(self._predictions, dst=0)
76
- if not comm.is_main_process():
77
- return
78
- predictions = defaultdict(list)
79
- for predictions_per_rank in all_predictions:
80
- for clsid, lines in predictions_per_rank.items():
81
- predictions[clsid].extend(lines)
82
- del all_predictions
83
-
84
- self._logger.info(
85
- "Evaluating {} using {} metric. "
86
- "Note that results do not use the official Matlab API.".format(
87
- self._dataset_name, 2007 if self._is_2007 else 2012
88
- )
89
- )
90
-
91
- with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
92
- res_file_template = os.path.join(dirname, "{}.txt")
93
-
94
- aps = defaultdict(list) # iou -> ap per class
95
- for cls_id, cls_name in enumerate(self._class_names):
96
- lines = predictions.get(cls_id, [""])
97
-
98
- with open(res_file_template.format(cls_name), "w") as f:
99
- f.write("\n".join(lines))
100
-
101
- for thresh in range(50, 100, 5):
102
- rec, prec, ap = voc_eval(
103
- res_file_template,
104
- self._anno_file_template,
105
- self._image_set_path,
106
- cls_name,
107
- ovthresh=thresh / 100.0,
108
- use_07_metric=self._is_2007,
109
- )
110
- aps[thresh].append(ap * 100)
111
-
112
- ret = OrderedDict()
113
- mAP = {iou: np.mean(x) for iou, x in aps.items()}
114
- ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
115
- return ret
116
-
117
-
118
- ##############################################################################
119
- #
120
- # Below code is modified from
121
- # https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
122
- # --------------------------------------------------------
123
- # Fast/er R-CNN
124
- # Licensed under The MIT License [see LICENSE for details]
125
- # Written by Bharath Hariharan
126
- # --------------------------------------------------------
127
-
128
- """Python implementation of the PASCAL VOC devkit's AP evaluation code."""
129
-
130
-
131
- @lru_cache(maxsize=None)
132
- def parse_rec(filename):
133
- """Parse a PASCAL VOC xml file."""
134
- with PathManager.open(filename) as f:
135
- tree = ET.parse(f)
136
- objects = []
137
- for obj in tree.findall("object"):
138
- obj_struct = {}
139
- obj_struct["name"] = obj.find("name").text
140
- obj_struct["pose"] = obj.find("pose").text
141
- obj_struct["truncated"] = int(obj.find("truncated").text)
142
- obj_struct["difficult"] = int(obj.find("difficult").text)
143
- bbox = obj.find("bndbox")
144
- obj_struct["bbox"] = [
145
- int(bbox.find("xmin").text),
146
- int(bbox.find("ymin").text),
147
- int(bbox.find("xmax").text),
148
- int(bbox.find("ymax").text),
149
- ]
150
- objects.append(obj_struct)
151
-
152
- return objects
153
-
154
-
155
- def voc_ap(rec, prec, use_07_metric=False):
156
- """Compute VOC AP given precision and recall. If use_07_metric is true, uses
157
- the VOC 07 11-point method (default:False).
158
- """
159
- if use_07_metric:
160
- # 11 point metric
161
- ap = 0.0
162
- for t in np.arange(0.0, 1.1, 0.1):
163
- if np.sum(rec >= t) == 0:
164
- p = 0
165
- else:
166
- p = np.max(prec[rec >= t])
167
- ap = ap + p / 11.0
168
- else:
169
- # correct AP calculation
170
- # first append sentinel values at the end
171
- mrec = np.concatenate(([0.0], rec, [1.0]))
172
- mpre = np.concatenate(([0.0], prec, [0.0]))
173
-
174
- # compute the precision envelope
175
- for i in range(mpre.size - 1, 0, -1):
176
- mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
177
-
178
- # to calculate area under PR curve, look for points
179
- # where X axis (recall) changes value
180
- i = np.where(mrec[1:] != mrec[:-1])[0]
181
-
182
- # and sum (\Delta recall) * prec
183
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
184
- return ap
185
-
186
-
187
- def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
188
- """rec, prec, ap = voc_eval(detpath,
189
- annopath,
190
- imagesetfile,
191
- classname,
192
- [ovthresh],
193
- [use_07_metric])
194
-
195
- Top level function that does the PASCAL VOC evaluation.
196
-
197
- detpath: Path to detections
198
- detpath.format(classname) should produce the detection results file.
199
- annopath: Path to annotations
200
- annopath.format(imagename) should be the xml annotations file.
201
- imagesetfile: Text file containing the list of images, one image per line.
202
- classname: Category name (duh)
203
- [ovthresh]: Overlap threshold (default = 0.5)
204
- [use_07_metric]: Whether to use VOC07's 11 point AP computation
205
- (default False)
206
- """
207
- # assumes detections are in detpath.format(classname)
208
- # assumes annotations are in annopath.format(imagename)
209
- # assumes imagesetfile is a text file with each line an image name
210
-
211
- # first load gt
212
- # read list of images
213
- with PathManager.open(imagesetfile, "r") as f:
214
- lines = f.readlines()
215
- imagenames = [x.strip() for x in lines]
216
-
217
- # load annots
218
- recs = {}
219
- for imagename in imagenames:
220
- recs[imagename] = parse_rec(annopath.format(imagename))
221
-
222
- # extract gt objects for this class
223
- class_recs = {}
224
- npos = 0
225
- for imagename in imagenames:
226
- R = [obj for obj in recs[imagename] if obj["name"] == classname]
227
- bbox = np.array([x["bbox"] for x in R])
228
- difficult = np.array([x["difficult"] for x in R]).astype(np.bool)
229
- # difficult = np.array([False for x in R]).astype(np.bool) # treat all "difficult" as GT
230
- det = [False] * len(R)
231
- npos = npos + sum(~difficult)
232
- class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
233
-
234
- # read dets
235
- detfile = detpath.format(classname)
236
- with open(detfile, "r") as f:
237
- lines = f.readlines()
238
-
239
- splitlines = [x.strip().split(" ") for x in lines]
240
- image_ids = [x[0] for x in splitlines]
241
- confidence = np.array([float(x[1]) for x in splitlines])
242
- BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
243
-
244
- # sort by confidence
245
- sorted_ind = np.argsort(-confidence)
246
- BB = BB[sorted_ind, :]
247
- image_ids = [image_ids[x] for x in sorted_ind]
248
-
249
- # go down dets and mark TPs and FPs
250
- nd = len(image_ids)
251
- tp = np.zeros(nd)
252
- fp = np.zeros(nd)
253
- for d in range(nd):
254
- R = class_recs[image_ids[d]]
255
- bb = BB[d, :].astype(float)
256
- ovmax = -np.inf
257
- BBGT = R["bbox"].astype(float)
258
-
259
- if BBGT.size > 0:
260
- # compute overlaps
261
- # intersection
262
- ixmin = np.maximum(BBGT[:, 0], bb[0])
263
- iymin = np.maximum(BBGT[:, 1], bb[1])
264
- ixmax = np.minimum(BBGT[:, 2], bb[2])
265
- iymax = np.minimum(BBGT[:, 3], bb[3])
266
- iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
267
- ih = np.maximum(iymax - iymin + 1.0, 0.0)
268
- inters = iw * ih
269
-
270
- # union
271
- uni = (
272
- (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
273
- + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
274
- - inters
275
- )
276
-
277
- overlaps = inters / uni
278
- ovmax = np.max(overlaps)
279
- jmax = np.argmax(overlaps)
280
-
281
- if ovmax > ovthresh:
282
- if not R["difficult"][jmax]:
283
- if not R["det"][jmax]:
284
- tp[d] = 1.0
285
- R["det"][jmax] = 1
286
- else:
287
- fp[d] = 1.0
288
- else:
289
- fp[d] = 1.0
290
-
291
- # compute precision recall
292
- fp = np.cumsum(fp)
293
- tp = np.cumsum(tp)
294
- rec = tp / float(npos)
295
- # avoid divide by zero in case the first detection matches a difficult
296
- # ground truth
297
- prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
298
- ap = voc_ap(rec, prec, use_07_metric)
299
-
300
- return rec, prec, ap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Carterclear/swarm-agents/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Swarm Agents
3
- emoji: 👁
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.28.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: swarm-agents/swarm-agents
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Celestinian/Nora-Inference/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Nora Inference
3
- emoji: 👁
4
- colorFrom: pink
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.28.3
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chukwuka/Dog_Breed_ImageWoof/utils.py DELETED
@@ -1,120 +0,0 @@
1
- import torch
2
- import matplotlib as plt
3
- from torch.utils.data import DataLoader, TensorDataset
4
-
5
- def show_example(img,label):
6
- print('Label: ', classes[label], '('+str(label)+')')
7
- plt.imshow(img.permute(1, 2, 0))
8
-
9
-
10
- def denormalize(images, means, stds):
11
- means = torch.tensor(means).reshape(1, 3, 1, 1)
12
- stds = torch.tensor(stds).reshape(1, 3, 1, 1)
13
- return images * stds + means
14
-
15
-
16
- def accuracy(out,labels):
17
- _, preds = torch.max(out,dim=1)
18
- total = torch.sum(preds == labels).item()/len(preds)
19
- return torch.tensor(total)
20
-
21
- @torch.inference_mode()
22
- def evaluation(model,val_loader):
23
- model.eval()
24
- results = [model.validation_step(batch) for batch in val_loader]
25
- outputs = model.validation_end_epoch(results)
26
- return outputs
27
-
28
-
29
- def to_device(data, device):
30
- if isinstance(data, (tuple, list)):
31
- return [to_device(x, device) for x in data]
32
- return data.to(device, non_blocking=True)
33
-
34
- class DeviceDataLoader(DataLoader):
35
- def __init__(self, dl, device):
36
- self.dl = dl
37
- self.device = device
38
-
39
- def __iter__(self):
40
- """Yield a batch of data after moving it to device"""
41
- for x in self.dl:
42
- yield to_device(x, self.device)
43
-
44
- def __len__(self):
45
- """Number of batches"""
46
- return len(self.dl)
47
-
48
-
49
- def get_lr(optimizer):
50
- for param_group in optimizer.param_groups:
51
- return param_group['lr']
52
-
53
- def fit_one_cycle(epochs, max_lr, model, train_loader, val_loader,
54
- weight_decay=0, grad_clip=None, opt_func=torch.optim.SGD):
55
- torch.cuda.empty_cache()
56
- history = []
57
-
58
- # Set up cutom optimizer with weight decay
59
- optimizer = opt_func(model.parameters(), max_lr, weight_decay=weight_decay)
60
-
61
- # Set up one-cycle learning rate scheduler
62
- sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, epochs=epochs,
63
- steps_per_epoch=len(train_loader))
64
-
65
- for epoch in range(epochs):
66
- # Training Phase
67
- model.train()
68
- train_losses = []
69
- train_acc = []
70
- lrs = []
71
- for batch in train_loader:
72
- loss, acc = model.training_step(batch)
73
- train_losses.append(loss)
74
- train_acc.append(acc)
75
- loss.backward()
76
-
77
- # Gradient clipping
78
- if grad_clip:
79
- nn.utils.clip_grad_value_(model.parameters(), grad_clip)
80
-
81
- optimizer.step()
82
- optimizer.zero_grad()
83
-
84
- # Record & update learning rate
85
- lrs.append(get_lr(optimizer))
86
- sched.step()
87
-
88
- # Validation phase
89
- result = evaluation(model, val_loader)
90
- result['train_losses'] = torch.stack(train_losses).mean().item()
91
- result['train_acc'] = torch.stack(train_acc).mean().item()
92
- result['lrs'] = lrs
93
- model.epoch_end(epoch, result)
94
- history.append(result)
95
- return history
96
-
97
-
98
- def plot_accuracies(history):
99
- plt.plot([x['val_acc'] for x in history], '-rx')
100
- plt.plot([x['train_acc'] for x in history[1:]], '-bx')
101
- plt.xlabel('epoch')
102
- plt.ylabel('accuracy')
103
- plt.legend(['Validation', 'Training'])
104
- plt.title('Accuracy vs. No. of epochs');
105
-
106
- def plot_losses(history):
107
- plt.plot([x['val_loss'] for x in history], '-rx')
108
- plt.plot([x['train_losses'] for x in history[1:]], '-bx')
109
- plt.xlabel('epoch')
110
- plt.ylabel('loss')
111
- plt.legend(['Validation', 'Training'])
112
- plt.title('Loss vs. No. of epochs');
113
-
114
-
115
- def plot_lrs(history):
116
- lrs = np.concatenate([x.get('lrs', []) for x in history])
117
- plt.plot(lrs)
118
- plt.xlabel('Batch no.')
119
- plt.ylabel('Learning rate')
120
- plt.title('Learning Rate vs. Batch no.');
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Clementapa/orang-outan-image-video-detection/app.py DELETED
@@ -1,230 +0,0 @@
1
- import os
2
- import os.path as osp
3
- from typing import List
4
-
5
- import cv2
6
- import gradio as gr
7
- import numpy as np
8
- import supervision as sv
9
- import torch
10
- from PIL import Image
11
- from supervision import Color
12
- from ultralytics import YOLO
13
-
14
- MARKDOWN = """
15
- <h1 style="text-align: center;"> WildGuardian: AI for Orangutan Ecosystem Surveillance 🦧🔍 </h1>
16
-
17
- ## About the model 👁️
18
- This is a demo for my YOLOv8 nano trained for orangutan detection.\\
19
- The model was trained using only ~1000 images of orangutan [this dataset](https://images.cv/dataset/orangutan-image-classification-dataset) and [this dataset](https://www.kaggle.com/datasets/slothkong/10-monkey-species/data) containing ~1000 images used as background images.\\
20
- Annotations were obtained using zero shot object detection method GroundingDino.\
21
-
22
- The full pipeline can be found on my github repository: https://github.com/clementapa/orangutan-image-video-detection.
23
-
24
- ## About the orangutans 🦧
25
- Because to habitat destruction, illicit poaching, and the pet trade, orangutans are in danger of going extinct. Their natural habitat has been significantly reduced by deforestation and the growth of palm oil plantations. Adult orangutans are occasionally sought for their body parts, and they are frequently captured and sold as pets. Climate change and disease are also taking a toll on their populations. Furthermore, it is concerning to note that they are limited to Borneo and Sumatra, two places on Earth. Sustainable practises and conservation initiatives are crucial to preventing the permanent extinction of these amazing animals.
26
-
27
- ## AI for good 🌍
28
- Artificial Intelligence (AI) has unquestionable power in the realm of innovation and technology. Even though artificial intelligence (AI) has frequently been used for commercial advantage, it is important to stress that AI can also be used for more noble purposes, such as protecting the environment and the planet's future. We can build a more promising and sustainable future if we reorient AI's focus from business to improving our planet.
29
- """
30
-
31
- EXAMPLES = []
32
-
33
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
34
-
35
- YOLO_MODEL = YOLO("train_7best.pt")
36
-
37
- BOX_ANNOTATOR = sv.BoxAnnotator(color=Color.from_hex("#FF00E4"))
38
-
39
-
40
- def annotate(
41
- image_bgr_numpy: Image.Image,
42
- detections: sv.Detections,
43
- annotator: sv.BoxAnnotator,
44
- labels: str,
45
- ) -> Image.Image:
46
- thickness = 2
47
- text_thickness = 1
48
- text_scale = 1.0
49
-
50
- height, width, _ = image_bgr_numpy.shape
51
-
52
- thickness_ratio = ((width + height) / 2) / 400
53
- text_scale_ratio = ((width + height) / 2) / 600
54
- text_thickness_ratio = ((width + height) / 2) / 400
55
-
56
- annotator.thickness = int(thickness * thickness_ratio)
57
- annotator.text_scale = float(text_scale * text_scale_ratio)
58
- annotator.text_thickness = int(text_thickness * text_thickness_ratio)
59
-
60
- annotated_bgr_image = annotator.annotate(
61
- scene=image_bgr_numpy, detections=detections, labels=labels
62
- )
63
- return Image.fromarray(annotated_bgr_image[:, :, ::-1])
64
-
65
-
66
- def inference_image(image_rgb_pil: Image.Image, confidence: float) -> List[Image.Image]:
67
- output = YOLO_MODEL(image_rgb_pil, imgsz=640, verbose=False)[0]
68
- detections = sv.Detections.from_ultralytics(output)
69
-
70
- detections = detections[detections.confidence >= confidence]
71
-
72
- labels = [
73
- f"{output.names[class_id]} {confidence:0.2f}"
74
- for _, _, confidence, class_id, _ in detections
75
- ]
76
-
77
- return annotate(
78
- image_bgr_numpy=output.orig_img.copy(),
79
- detections=detections,
80
- annotator=BOX_ANNOTATOR,
81
- labels=labels,
82
- )
83
-
84
-
85
- def process_frame(frame: np.ndarray, confidence: float) -> np.ndarray:
86
- output = YOLO_MODEL(frame, imgsz=640, verbose=False)[0]
87
-
88
- detections = sv.Detections.from_ultralytics(output)
89
-
90
- detections = detections[detections.confidence >= confidence]
91
-
92
- labels = [
93
- f"{output.names[class_id]} {confidence:0.2f}"
94
- for _, _, confidence, class_id, _ in detections
95
- ]
96
-
97
- thickness = 2
98
- text_thickness = 1
99
- text_scale = 1.0
100
-
101
- height, width, _ = output.orig_img.shape
102
-
103
- thickness_ratio = ((width + height) / 2) / 400
104
- text_scale_ratio = ((width + height) / 2) / 600
105
- text_thickness_ratio = ((width + height) / 2) / 400
106
-
107
- BOX_ANNOTATOR.thickness = int(thickness * thickness_ratio)
108
- BOX_ANNOTATOR.text_scale = float(text_scale * text_scale_ratio)
109
- BOX_ANNOTATOR.text_thickness = int(text_thickness * text_thickness_ratio)
110
-
111
- annotated_frame = BOX_ANNOTATOR.annotate(
112
- scene=output.orig_img.copy(), detections=detections, labels=labels
113
- )
114
- return annotated_frame
115
-
116
-
117
- def inference_video(path_video, confidence):
118
- path_output_video = "temp.mp4"
119
- video_capture = cv2.VideoCapture(path_video)
120
-
121
- # Check if the video file was successfully opened
122
- if not video_capture.isOpened():
123
- print("Error: Could not open video file.")
124
- exit()
125
-
126
- frame_width = int(video_capture.get(3))
127
- frame_height = int(video_capture.get(4))
128
- frame_rate = int(video_capture.get(5))
129
-
130
- fourcc = cv2.VideoWriter_fourcc(*"mp4v") # You can change the codec as needed
131
- out = cv2.VideoWriter(
132
- path_output_video, fourcc, frame_rate, (frame_width, frame_height)
133
- )
134
-
135
- while True:
136
- # Read a frame from the video
137
- ret, frame = video_capture.read()
138
-
139
- # Check if the video has ended
140
- if not ret:
141
- break
142
-
143
- # Do something with the frame (e.g., display it or process it)
144
- # For example, you can display the frame in a window
145
- annotated_frame = process_frame(frame, confidence=confidence)
146
-
147
- out.write(annotated_frame)
148
-
149
- # Release the video capture object and close any open windows
150
- video_capture.release()
151
- out.release()
152
- cv2.destroyAllWindows()
153
-
154
- return path_output_video
155
-
156
-
157
- custom_theme = gr.themes.Soft(primary_hue="green")
158
- with gr.Blocks(theme=custom_theme, css="style.css") as demo:
159
- gr.Markdown(MARKDOWN)
160
-
161
- with gr.Tab("Detect on an image 🖼️"):
162
- with gr.Row():
163
- with gr.Column():
164
- input_image = gr.Image(
165
- image_mode="RGB",
166
- sources=["upload", "clipboard"],
167
- type="pil",
168
- )
169
- example_folder = osp.join(
170
- osp.dirname(__file__), "resources/examples_images"
171
- )
172
- example_fns = [
173
- osp.join(example_folder, example)
174
- for example in os.listdir(example_folder)
175
- ]
176
- gr.Examples(
177
- examples=example_fns,
178
- inputs=[input_image],
179
- outputs=[input_image],
180
- cache_examples=False,
181
- label="Examples (click one of the images below to start)",
182
- examples_per_page=10,
183
- )
184
- confidence_image_slider = gr.Slider(
185
- label="Confidence", minimum=0.1, maximum=1.0, step=0.05, value=0.6
186
- )
187
- submit_button_image = gr.Button("Let's find orangutans 🦧 !")
188
- output_image = gr.Image(label="Results", type="pil")
189
-
190
- with gr.Tab("Detect on a video 📹"):
191
- with gr.Row():
192
- with gr.Column():
193
- input_video = gr.Video(sources=["upload"])
194
- example_folder = osp.join(
195
- osp.dirname(__file__), "resources/examples_videos"
196
- )
197
- example_fns = [
198
- osp.join(example_folder, example)
199
- for example in os.listdir(example_folder)
200
- ]
201
- gr.Examples(
202
- examples=example_fns,
203
- inputs=[input_video],
204
- outputs=[input_video],
205
- cache_examples=False,
206
- label="Examples (click one of the videos below to start)",
207
- examples_per_page=10,
208
- )
209
- confidence_video_slider = gr.Slider(
210
- label="Confidence", minimum=0.1, maximum=1.0, step=0.05, value=0.6
211
- )
212
- submit_button_video = gr.Button("Let's find orangutans 🦧 !")
213
- output_video = gr.Video(label="Results")
214
-
215
- submit_button_image.click(
216
- inference_image,
217
- inputs=[input_image, confidence_image_slider],
218
- outputs=output_image,
219
- queue=True,
220
- )
221
-
222
- submit_button_video.click(
223
- inference_video,
224
- inputs=[input_video, confidence_video_slider],
225
- outputs=output_video,
226
- queue=True,
227
- )
228
-
229
- if __name__ == "__main__":
230
- demo.queue(max_size=20, api_open=False).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/client/css/hljs.css DELETED
@@ -1,68 +0,0 @@
1
- .hljs {
2
- color: #e9e9f4;
3
- background: #28293629;
4
- border-radius: var(--border-radius-1);
5
- border: 1px solid var(--blur-border);
6
- font-size: 15px;
7
- word-wrap: break-word;
8
- white-space: pre-wrap;
9
- }
10
-
11
- /* style for hljs copy */
12
- .hljs-copy-wrapper {
13
- position: relative;
14
- overflow: hidden;
15
- }
16
-
17
- .hljs-copy-wrapper:hover .hljs-copy-button,
18
- .hljs-copy-button:focus {
19
- transform: translateX(0);
20
- }
21
-
22
- .hljs-copy-button {
23
- position: absolute;
24
- transform: translateX(calc(100% + 1.125em));
25
- top: 1em;
26
- right: 1em;
27
- width: 2rem;
28
- height: 2rem;
29
- text-indent: -9999px;
30
- color: #fff;
31
- border-radius: 0.25rem;
32
- border: 1px solid #ffffff22;
33
- background-color: #2d2b57;
34
- background-image: url('data:image/svg+xml;utf-8,<svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M6 5C5.73478 5 5.48043 5.10536 5.29289 5.29289C5.10536 5.48043 5 5.73478 5 6V20C5 20.2652 5.10536 20.5196 5.29289 20.7071C5.48043 20.8946 5.73478 21 6 21H18C18.2652 21 18.5196 20.8946 18.7071 20.7071C18.8946 20.5196 19 20.2652 19 20V6C19 5.73478 18.8946 5.48043 18.7071 5.29289C18.5196 5.10536 18.2652 5 18 5H16C15.4477 5 15 4.55228 15 4C15 3.44772 15.4477 3 16 3H18C18.7956 3 19.5587 3.31607 20.1213 3.87868C20.6839 4.44129 21 5.20435 21 6V20C21 20.7957 20.6839 21.5587 20.1213 22.1213C19.5587 22.6839 18.7957 23 18 23H6C5.20435 23 4.44129 22.6839 3.87868 22.1213C3.31607 21.5587 3 20.7957 3 20V6C3 5.20435 3.31607 4.44129 3.87868 3.87868C4.44129 3.31607 5.20435 3 6 3H8C8.55228 3 9 3.44772 9 4C9 4.55228 8.55228 5 8 5H6Z" fill="white"/><path fill-rule="evenodd" clip-rule="evenodd" d="M7 3C7 1.89543 7.89543 1 9 1H15C16.1046 1 17 1.89543 17 3V5C17 6.10457 16.1046 7 15 7H9C7.89543 7 7 6.10457 7 5V3ZM15 3H9V5H15V3Z" fill="white"/></svg>');
35
- background-repeat: no-repeat;
36
- background-position: center;
37
- transition: background-color 200ms ease, transform 200ms ease-out;
38
- }
39
-
40
- .hljs-copy-button:hover {
41
- border-color: #ffffff44;
42
- }
43
-
44
- .hljs-copy-button:active {
45
- border-color: #ffffff66;
46
- }
47
-
48
- .hljs-copy-button[data-copied="true"] {
49
- text-indent: 0;
50
- width: auto;
51
- background-image: none;
52
- }
53
-
54
- .hljs-copy-alert {
55
- clip: rect(0 0 0 0);
56
- clip-path: inset(50%);
57
- height: 1px;
58
- overflow: hidden;
59
- position: absolute;
60
- white-space: nowrap;
61
- width: 1px;
62
- }
63
-
64
- @media (prefers-reduced-motion) {
65
- .hljs-copy-button {
66
- transition: none;
67
- }
68
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/Provider/Providers/Bard.py DELETED
@@ -1,74 +0,0 @@
1
- import os, requests, json, browser_cookie3, re, random
2
- from ...typing import sha256, Dict, get_type_hints
3
-
4
- url = 'https://bard.google.com'
5
- model = ['Palm2']
6
- supports_stream = False
7
- needs_auth = True
8
-
9
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
10
- psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
11
- domain_name='.google.com')}['__Secure-1PSID']
12
-
13
- formatted = '\n'.join([
14
- '%s: %s' % (message['role'], message['content']) for message in messages
15
- ])
16
- prompt = f'{formatted}\nAssistant:'
17
-
18
- proxy = kwargs.get('proxy', False)
19
- if proxy == False:
20
- print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work')
21
-
22
- snlm0e = None
23
- conversation_id = None
24
- response_id = None
25
- choice_id = None
26
-
27
- client = requests.Session()
28
- client.proxies = {
29
- 'http': f'http://{proxy}',
30
- 'https': f'http://{proxy}'} if proxy else None
31
-
32
- client.headers = {
33
- 'authority': 'bard.google.com',
34
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
35
- 'origin': 'https://bard.google.com',
36
- 'referer': 'https://bard.google.com/',
37
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
38
- 'x-same-domain': '1',
39
- 'cookie': f'__Secure-1PSID={psid}'
40
- }
41
-
42
- snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
43
- client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
44
-
45
- params = {
46
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
47
- '_reqid': random.randint(1111, 9999),
48
- 'rt': 'c'
49
- }
50
-
51
- data = {
52
- 'at': snlm0e,
53
- 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
54
-
55
- intents = '.'.join([
56
- 'assistant',
57
- 'lamda',
58
- 'BardFrontendService'
59
- ])
60
-
61
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
62
- data=data, params=params)
63
-
64
- chat_data = json.loads(response.content.splitlines()[3])[0][2]
65
- if chat_data:
66
- json_chat_data = json.loads(chat_data)
67
-
68
- yield json_chat_data[0][0]
69
-
70
- else:
71
- yield 'error'
72
-
73
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
74
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/event.py DELETED
@@ -1,12 +0,0 @@
1
- import cv2
2
- import logging
3
- def wait_key(target = None):
4
- key = cv2.waitKey()& 0xFF
5
- if target == None:
6
- return key
7
- if type(target) == str:
8
- target = ord(target)
9
- while key != target:
10
- key = cv2.waitKey()& 0xFF
11
-
12
- logging.debug('Key Pression caught:%s'%(target))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DDD2222/webui/README.md DELETED
@@ -1,20 +0,0 @@
1
- ---
2
- title: Stable Diffusion Web UI
3
- emoji: 🚧
4
- colorFrom: yellow
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.9
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: camenduru/webui
11
- ---
12
-
13
- ## Stable Diffusion Web UI
14
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
15
-
16
- ## Documentation
17
- [https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki)
18
-
19
- ## Models License
20
- https://huggingface.co/spaces/CompVis/stable-diffusion-license
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/checkbox.py DELETED
@@ -1,134 +0,0 @@
1
- """gr.Checkbox() component."""
2
-
3
- from __future__ import annotations
4
-
5
- from typing import Callable, Literal
6
-
7
- from gradio_client.documentation import document, set_documentation_group
8
- from gradio_client.serializing import BooleanSerializable
9
-
10
- from gradio.components.base import FormComponent, IOComponent, _Keywords
11
- from gradio.events import Changeable, EventListenerMethod, Inputable, Selectable
12
- from gradio.interpretation import NeighborInterpretable
13
-
14
- set_documentation_group("component")
15
-
16
-
17
- @document()
18
- class Checkbox(
19
- FormComponent,
20
- Changeable,
21
- Inputable,
22
- Selectable,
23
- IOComponent,
24
- BooleanSerializable,
25
- NeighborInterpretable,
26
- ):
27
- """
28
- Creates a checkbox that can be set to `True` or `False`.
29
-
30
- Preprocessing: passes the status of the checkbox as a {bool} into the function.
31
- Postprocessing: expects a {bool} returned from the function and, if it is True, checks the checkbox.
32
- Examples-format: a {bool} representing whether the box is checked.
33
- Demos: sentence_builder, titanic_survival
34
- """
35
-
36
- def __init__(
37
- self,
38
- value: bool | Callable = False,
39
- *,
40
- label: str | None = None,
41
- info: str | None = None,
42
- every: float | None = None,
43
- show_label: bool | None = None,
44
- container: bool = True,
45
- scale: int | None = None,
46
- min_width: int = 160,
47
- interactive: bool | None = None,
48
- visible: bool = True,
49
- elem_id: str | None = None,
50
- elem_classes: list[str] | str | None = None,
51
- **kwargs,
52
- ):
53
- """
54
- Parameters:
55
- value: if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
56
- label: component name in interface.
57
- info: additional component description.
58
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
59
- show_label: if True, will display label.
60
- container: If True, will place the component in a container - providing some extra padding around the border.
61
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
62
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
63
- interactive: if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
64
- visible: If False, component will be hidden.
65
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
66
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
67
- """
68
- self.select: EventListenerMethod
69
- """
70
- Event listener for when the user selects or deselects Checkbox.
71
- Uses event data gradio.SelectData to carry `value` referring to label of checkbox, and `selected` to refer to state of checkbox.
72
- See EventData documentation on how to use this event data.
73
- """
74
- IOComponent.__init__(
75
- self,
76
- label=label,
77
- info=info,
78
- every=every,
79
- show_label=show_label,
80
- container=container,
81
- scale=scale,
82
- min_width=min_width,
83
- interactive=interactive,
84
- visible=visible,
85
- elem_id=elem_id,
86
- elem_classes=elem_classes,
87
- value=value,
88
- **kwargs,
89
- )
90
- NeighborInterpretable.__init__(self)
91
-
92
- def get_config(self):
93
- return {
94
- "value": self.value,
95
- **IOComponent.get_config(self),
96
- }
97
-
98
- @staticmethod
99
- def update(
100
- value: bool | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
101
- label: str | None = None,
102
- info: str | None = None,
103
- show_label: bool | None = None,
104
- container: bool | None = None,
105
- scale: int | None = None,
106
- min_width: int | None = None,
107
- interactive: bool | None = None,
108
- visible: bool | None = None,
109
- ):
110
- return {
111
- "label": label,
112
- "info": info,
113
- "show_label": show_label,
114
- "container": container,
115
- "scale": scale,
116
- "min_width": min_width,
117
- "interactive": interactive,
118
- "visible": visible,
119
- "value": value,
120
- "__type__": "update",
121
- }
122
-
123
- def get_interpretation_neighbors(self, x):
124
- return [not x], {}
125
-
126
- def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
127
- """
128
- Returns:
129
- The first value represents the interpretation score if the input is False, and the second if the input is True.
130
- """
131
- if x:
132
- return scores[0], None
133
- else:
134
- return None, scores[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/_tensorboard_logger.py DELETED
@@ -1,157 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """Contains a logger to push training logs to the Hub, using Tensorboard."""
15
- from pathlib import Path
16
- from typing import TYPE_CHECKING, List, Optional, Union
17
-
18
- from huggingface_hub._commit_scheduler import CommitScheduler
19
-
20
- from .utils import experimental, is_tensorboard_available
21
-
22
-
23
- if is_tensorboard_available():
24
- from tensorboardX import SummaryWriter
25
-
26
- # TODO: clarify: should we import from torch.utils.tensorboard ?
27
- else:
28
- SummaryWriter = object # Dummy class to avoid failing at import. Will raise on instance creation.
29
-
30
- if TYPE_CHECKING:
31
- from tensorboardX import SummaryWriter
32
-
33
-
34
- class HFSummaryWriter(SummaryWriter):
35
- """
36
- Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.
37
-
38
- Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate
39
- thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection
40
- issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`
41
- minutes (default to every 5 minutes).
42
-
43
- <Tip warning={true}>
44
-
45
- `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.
46
-
47
- </Tip>
48
-
49
- Args:
50
- repo_id (`str`):
51
- The id of the repo to which the logs will be pushed.
52
- logdir (`str`, *optional*):
53
- The directory where the logs will be written. If not specified, a local directory will be created by the
54
- underlying `SummaryWriter` object.
55
- commit_every (`int` or `float`, *optional*):
56
- The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.
57
- repo_type (`str`, *optional*):
58
- The type of the repo to which the logs will be pushed. Defaults to "model".
59
- repo_revision (`str`, *optional*):
60
- The revision of the repo to which the logs will be pushed. Defaults to "main".
61
- repo_private (`bool`, *optional*):
62
- Whether to create a private repo or not. Defaults to False. This argument is ignored if the repo already
63
- exists.
64
- path_in_repo (`str`, *optional*):
65
- The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".
66
- repo_allow_patterns (`List[str]` or `str`, *optional*):
67
- A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the
68
- [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
69
- repo_ignore_patterns (`List[str]` or `str`, *optional*):
70
- A list of patterns to exclude in the upload. Check out the
71
- [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
72
- token (`str`, *optional*):
73
- Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
74
- details
75
- kwargs:
76
- Additional keyword arguments passed to `SummaryWriter`.
77
-
78
- Examples:
79
- ```py
80
- >>> from huggingface_hub import HFSummaryWriter
81
-
82
- # Logs are automatically pushed every 15 minutes
83
- >>> logger = HFSummaryWriter(repo_id="test_hf_logger", commit_every=15)
84
- >>> logger.add_scalar("a", 1)
85
- >>> logger.add_scalar("b", 2)
86
- ...
87
-
88
- # You can also trigger a push manually
89
- >>> logger.scheduler.trigger()
90
- ```
91
-
92
- ```py
93
- >>> from huggingface_hub import HFSummaryWriter
94
-
95
- # Logs are automatically pushed every 5 minutes (default) + when exiting the context manager
96
- >>> with HFSummaryWriter(repo_id="test_hf_logger") as logger:
97
- ... logger.add_scalar("a", 1)
98
- ... logger.add_scalar("b", 2)
99
- ```
100
- """
101
-
102
- @experimental
103
- def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":
104
- if not is_tensorboard_available():
105
- raise ImportError(
106
- "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"
107
- " tensorboardX` first."
108
- )
109
- return super().__new__(cls)
110
-
111
- def __init__(
112
- self,
113
- repo_id: str,
114
- *,
115
- logdir: Optional[str] = None,
116
- commit_every: Union[int, float] = 5,
117
- repo_type: Optional[str] = None,
118
- repo_revision: Optional[str] = None,
119
- repo_private: bool = False,
120
- path_in_repo: Optional[str] = "tensorboard",
121
- repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",
122
- repo_ignore_patterns: Optional[Union[List[str], str]] = None,
123
- token: Optional[str] = None,
124
- **kwargs,
125
- ):
126
- # Initialize SummaryWriter
127
- super().__init__(logdir=logdir, **kwargs)
128
-
129
- # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.
130
- if not isinstance(self.logdir, str):
131
- raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")
132
-
133
- # Append logdir name to `path_in_repo`
134
- if path_in_repo is None or path_in_repo == "":
135
- path_in_repo = Path(self.logdir).name
136
- else:
137
- path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name
138
-
139
- # Initialize scheduler
140
- self.scheduler = CommitScheduler(
141
- folder_path=self.logdir,
142
- path_in_repo=path_in_repo,
143
- repo_id=repo_id,
144
- repo_type=repo_type,
145
- revision=repo_revision,
146
- private=repo_private,
147
- token=token,
148
- allow_patterns=repo_allow_patterns,
149
- ignore_patterns=repo_ignore_patterns,
150
- every=commit_every,
151
- )
152
-
153
- def __exit__(self, exc_type, exc_val, exc_tb):
154
- """Push to hub in a non-blocking way when exiting the logger's context manager."""
155
- super().__exit__(exc_type, exc_val, exc_tb)
156
- future = self.scheduler.trigger()
157
- future.result()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/lfs.py DELETED
@@ -1,202 +0,0 @@
1
- """
2
- Implementation of a custom transfer agent for the transfer type "multipart" for
3
- git-lfs.
4
-
5
- Inspired by:
6
- github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
7
-
8
- Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
9
-
10
-
11
- To launch debugger while developing:
12
-
13
- ``` [lfs "customtransfer.multipart"]
14
- path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678
15
- --wait-for-client
16
- /path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py
17
- lfs-multipart-upload ```"""
18
-
19
- import json
20
- import os
21
- import subprocess
22
- import sys
23
- from argparse import _SubParsersAction
24
- from typing import Dict, List, Optional
25
-
26
- from huggingface_hub.commands import BaseHuggingfaceCLICommand
27
- from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND, SliceFileObj
28
-
29
- from ..utils import get_session, hf_raise_for_status, logging
30
-
31
-
32
- logger = logging.get_logger(__name__)
33
-
34
-
35
- class LfsCommands(BaseHuggingfaceCLICommand):
36
- """
37
- Implementation of a custom transfer agent for the transfer type "multipart"
38
- for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom
39
- transfer agent is:
40
- https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
41
-
42
- This introduces two commands to the CLI:
43
-
44
- 1. $ huggingface-cli lfs-enable-largefiles
45
-
46
- This should be executed once for each model repo that contains a model file
47
- >5GB. It's documented in the error message you get if you just try to git
48
- push a 5GB file without having enabled it before.
49
-
50
- 2. $ huggingface-cli lfs-multipart-upload
51
-
52
- This command is called by lfs directly and is not meant to be called by the
53
- user.
54
- """
55
-
56
- @staticmethod
57
- def register_subcommand(parser: _SubParsersAction):
58
- enable_parser = parser.add_parser(
59
- "lfs-enable-largefiles",
60
- help="Configure your repository to enable upload of files > 5GB.",
61
- )
62
- enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
63
- enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
64
-
65
- upload_parser = parser.add_parser(
66
- LFS_MULTIPART_UPLOAD_COMMAND,
67
- help="Command will get called by git-lfs, do not call it directly.",
68
- )
69
- upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
70
-
71
-
72
- class LfsEnableCommand:
73
- def __init__(self, args):
74
- self.args = args
75
-
76
- def run(self):
77
- local_path = os.path.abspath(self.args.path)
78
- if not os.path.isdir(local_path):
79
- print("This does not look like a valid git repo.")
80
- exit(1)
81
- subprocess.run(
82
- "git config lfs.customtransfer.multipart.path huggingface-cli".split(),
83
- check=True,
84
- cwd=local_path,
85
- )
86
- subprocess.run(
87
- f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
88
- check=True,
89
- cwd=local_path,
90
- )
91
- print("Local repo set up for largefiles")
92
-
93
-
94
- def write_msg(msg: Dict):
95
- """Write out the message in Line delimited JSON."""
96
- msg_str = json.dumps(msg) + "\n"
97
- sys.stdout.write(msg_str)
98
- sys.stdout.flush()
99
-
100
-
101
- def read_msg() -> Optional[Dict]:
102
- """Read Line delimited JSON from stdin."""
103
- msg = json.loads(sys.stdin.readline().strip())
104
-
105
- if "terminate" in (msg.get("type"), msg.get("event")):
106
- # terminate message received
107
- return None
108
-
109
- if msg.get("event") not in ("download", "upload"):
110
- logger.critical("Received unexpected message")
111
- sys.exit(1)
112
-
113
- return msg
114
-
115
-
116
- class LfsUploadCommand:
117
- def __init__(self, args):
118
- self.args = args
119
-
120
- def run(self):
121
- # Immediately after invoking a custom transfer process, git-lfs
122
- # sends initiation data to the process over stdin.
123
- # This tells the process useful information about the configuration.
124
- init_msg = json.loads(sys.stdin.readline().strip())
125
- if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
126
- write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
127
- sys.exit(1)
128
-
129
- # The transfer process should use the information it needs from the
130
- # initiation structure, and also perform any one-off setup tasks it
131
- # needs to do. It should then respond on stdout with a simple empty
132
- # confirmation structure, as follows:
133
- write_msg({})
134
-
135
- # After the initiation exchange, git-lfs will send any number of
136
- # transfer requests to the stdin of the transfer process, in a serial sequence.
137
- while True:
138
- msg = read_msg()
139
- if msg is None:
140
- # When all transfers have been processed, git-lfs will send
141
- # a terminate event to the stdin of the transfer process.
142
- # On receiving this message the transfer process should
143
- # clean up and terminate. No response is expected.
144
- sys.exit(0)
145
-
146
- oid = msg["oid"]
147
- filepath = msg["path"]
148
- completion_url = msg["action"]["href"]
149
- header = msg["action"]["header"]
150
- chunk_size = int(header.pop("chunk_size"))
151
- presigned_urls: List[str] = list(header.values())
152
-
153
- # Send a "started" progress event to allow other workers to start.
154
- # Otherwise they're delayed until first "progress" event is reported,
155
- # i.e. after the first 5GB by default (!)
156
- write_msg(
157
- {
158
- "event": "progress",
159
- "oid": oid,
160
- "bytesSoFar": 1,
161
- "bytesSinceLast": 0,
162
- }
163
- )
164
-
165
- parts = []
166
- with open(filepath, "rb") as file:
167
- for i, presigned_url in enumerate(presigned_urls):
168
- with SliceFileObj(
169
- file,
170
- seek_from=i * chunk_size,
171
- read_limit=chunk_size,
172
- ) as data:
173
- r = get_session().put(presigned_url, data=data)
174
- hf_raise_for_status(r)
175
- parts.append(
176
- {
177
- "etag": r.headers.get("etag"),
178
- "partNumber": i + 1,
179
- }
180
- )
181
- # In order to support progress reporting while data is uploading / downloading,
182
- # the transfer process should post messages to stdout
183
- write_msg(
184
- {
185
- "event": "progress",
186
- "oid": oid,
187
- "bytesSoFar": (i + 1) * chunk_size,
188
- "bytesSinceLast": chunk_size,
189
- }
190
- )
191
- # Not precise but that's ok.
192
-
193
- r = get_session().post(
194
- completion_url,
195
- json={
196
- "oid": oid,
197
- "parts": parts,
198
- },
199
- )
200
- hf_raise_for_status(r)
201
-
202
- write_msg({"event": "complete", "oid": oid})