diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/train.py b/spaces/17TheWord/RealESRGAN/realesrgan/train.py
deleted file mode 100644
index 8a9cec9ed80d9f362984779548dcec921a636a04..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/realesrgan/train.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-import os.path as osp
-from basicsr.train import train_pipeline
-
-import realesrgan.archs
-import realesrgan.data
-import realesrgan.models
-
-if __name__ == '__main__':
- root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
- train_pipeline(root_path)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md
deleted file mode 100644
index 5eaae246e0581a6dafa9fd6ba8fee142f52b97f7..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
Introduction
-
Have you ever heard of desarrollo del pensamiento tomo 2 resuelto pdf 27? If you are interested in developing your thinking skills, this book is for you. It is a Spanish book that translates to "Development of Thinking Volume 2 Solved PDF 27". It is a comprehensive guide that covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It is written by a team of experts from different fields and disciplines, and it includes exercises, examples, diagrams, tables and charts to help you understand and apply the concepts.
-
Why is it important to study this book? Because in today's complex and dynamic world, you need to be able to think clearly, critically and creatively. You need to be able to analyze information, evaluate arguments, solve problems, make decisions, generate ideas and innovate solutions. These skills are essential for your personal and professional growth, as well as for your contribution to society. By studying this book, you will learn how to improve your thinking skills and become a better thinker.
The book is divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation. Each part contains several chapters that explore different aspects of these topics. Here are some of the main topics covered in the book:
-
Logic and reasoning
-
This part introduces you to the basics of logic and reasoning, such as propositions, arguments, validity, soundness, fallacies, induction, deduction and abduction. You will learn how to identify and construct valid and sound arguments, how to avoid common logical errors and fallacies, how to use different types of reasoning for different purposes and contexts, and how to evaluate the strength of evidence and arguments.
-
Critical thinking and problem solving
-
This part teaches you how to apply logic and reasoning to critical thinking and problem solving. You will learn how to define problems, identify assumptions, generate hypotheses, test solutions, monitor results and revise strategies. You will also learn how to use various tools and techniques for critical thinking and problem solving, such as brainstorming, mind mapping, SWOT analysis, decision matrix, fishbone diagram and Pareto principle.
-
desarrollo del pensamiento tomo 2 solucionario pdf gratis
-descargar desarrollo del pensamiento tomo 2 resuelto pdf
-libro desarrollo del pensamiento tomo 2 resuelto pdf completo
-desarrollo del pensamiento tomo 2 resuelto pdf 2021
-desarrollo del pensamiento tomo 2 resuelto pdf online
-desarrollo del pensamiento tomo 2 resuelto pdf descargar gratis
-desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27
-desarrollo del pensamiento tomo 2 resuelto pdf ejercicios
-desarrollo del pensamiento tomo 2 resuelto pdf pagina 27
-desarrollo del pensamiento tomo 2 resuelto pdf gratis
-desarrollo del pensamiento tomo 2 resuelto pdf sway
-desarrollo del pensamiento tomo 2 resuelto pdf soundcloud
-desarrollo del pensamiento tomo 2 resuelto pdf libro
-desarrollo del pensamiento tomo 2 resuelto pdf download
-desarrollo del pensamiento tomo 2 resuelto pdf gratis online
-desarrollo del pensamiento tomo 2 resuelto pdf soluciones
-desarrollo del pensamiento tomo 2 resuelto pdf completo
-desarrollo del pensamiento tomo 2 resuelto pdf gratis descargar
-desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27 solucionario
-desarrollo del pensamiento tomo 2 resuelto pdf ejercicios resueltos
-desarrollo del pensamiento tomo 2 resuelto pdf pagina 27 soluciones
-desarrollo del pensamiento tomo 2 resuelto pdf gratis sway
-desarrollo del pensamiento tomo 2 resuelto pdf gratis soundcloud
-desarrollo del pensamiento tomo 2 resuelto pdf libro gratis
-desarrollo del pensamiento tomo 2 resuelto pdf download gratis
-desarrollo del pensamiento tomo 2 solucionario pdf online
-descargar desarrollo del pensamiento tomo 2 solucionario pdf gratis
-libro desarrollo del pensamiento tomo 2 solucionario pdf completo
-desarrollo del pensamiento tomo 2 solucionario pdf 2021
-desarrollo del pensamiento tomo 2 solucionario pdf descargar gratis
-desarrollo del pensamiento tomo 2 solucionario pdf capitulo 27
-desarrollo del pensamiento tomo 2 solucionario pdf ejercicios
-desarrollo del pensamiento tomo 2 solucionario pdf pagina 27
-desarrollo del pensamiento tomo 2 solucionario pdf sway
-desarrollo del pensamiento tomo 2 solucionario pdf soundcloud
-desarrollo del pensamiento tomo 2 solucionario pdf libro
-desarrollo del pensamiento tomo 2 solucionario pdf download
-descargar desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf gratis
-libro desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf completo
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf online
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf descargar gratis
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 ejercicios
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pagina
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 sway
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 soundcloud
-desarrollo del pensamiento tomo 2 resuelto capitulo
-
Creativity and innovation
-
This part shows you how to use logic and reasoning to enhance your creativity and innovation. You will learn how to develop your creative potential, overcome mental blocks, stimulate your imagination, generate original ideas and implement innovative solutions. You will also learn how to use various methods and models for creativity and innovation, such as lateral thinking, divergent thinking, convergent thinking, TRIZ method, SCAMPER technique and design thinking.
-
How can you access the book online?
-
If you want to read desarrollo del pensamiento tomo 2 resuelto pdf 27 online, you have several options. Here are some of them:
-
Download it from Sway
-
Sway is a Microsoft service that allows you to create and share interactive presentations online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Sway by following this link: https://sway.office.com/skrWSVcG4BefKxCb. You can download the PDF file from there by clicking on the download icon at the top right corner of the screen.
-
Read it on Scribd
-
Scribd is a digital library that offers unlimited access to books, audiobooks, magazines and documents online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Scribd by following this link: https://www.scribd.com/document/511741583/Desarrollo-Del-Pensamiento-Tomo-2-Resuelto-Pdf-27. You can read the book online or download it as a PDF file by clicking on the download icon at the top right corner of the screen.
-
Buy it from Amazon
-
Amazon is an online marketplace that sells books, electronics, clothing and other products. You can buy desarrollo del pensamiento tomo 2 resuelto pdf 27 on Amazon by following this link: https://www.amazon.com/Desarrollo-Del-Pensamiento-Tomo-Resuelto/dp/B08ZJWZQ8Q. You can order the paperback version or the Kindle version of the book by clicking on the add to cart or buy now buttons.
-
How can you use the book to improve your skills?
-
Reading desarrollo del pensamiento tomo 2 resuelto pdf 27 online is not enough if you want to improve your skills. You need to practice what you learn by doing the exercises and examples in the book. You also need to apply what you learn by using the concepts in real-life situations. Here are some tips on how to use the book effectively:
-
Follow the exercises and examples
-
The book contains many exercises and examples that help you test your understanding and reinforce your learning. You should follow them carefully and try to solve them on your own before checking the answers. You should also compare your answers with those provided in the book and analyze why they are correct or incorrect. This will help you identify your strengths and weaknesses and improve your skills.
-
Apply the concepts to real-life situations
-
The book also contains many case studies and scenarios that illustrate how the concepts can be applied in real-life situations. You should read them attentively and try to relate them to your own experiences or interests. You should also think of other situations where you can use the concepts in your personal or professional life. This will help you transfer your learning from theory to practice and enhance your skills.
-
Join a study group or a forum
-
The book can be more enjoyable and effective if you study it with others who share your interest or goal. You can join a study group or a forum where you can discuss the topics in the book with other learners or experts. You can ask questions, share insights, exchange feedbacks or challenge each other with new problems or ideas. This will help you expand your perspective and deepen your understanding.
-
Conclusion
-
Summary of the main points
-
In conclusion, desarrollo del pensamiento tomo 2 resuelto pdf 27 is a valuable resource for anyone who wants to develop their thinking skills. It covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It provides exercises, examples, diagrams, tables and charts to help you understand and apply the concepts. It also offers several options for accessing the book online, such as downloading it from Sway, reading it on Scribd or buying it from Amazon. Finally, it gives some tips on how to use the book effectively, such as following the exercises and examples, applying the concepts to real-life situations or joining a study group or a forum.
-
Recommendations for further reading
-
If you want to learn more about the topics covered in the book, you can check out these resources:
-
-
Logical and Critical Thinking: This is a free online course offered by the University of Auckland that teaches you how to identify, analyze and evaluate arguments using logic and critical thinking.
-
Problem Solving using Computational Thinking: This is a free online course offered by the University of Michigan that teaches you how to use computational thinking to solve complex problems in various domains.
-
Creativity, Innovation and Change: This is a paid online course offered by Udemy that teaches you how to unleash your creativity, generate innovative ideas and implement change in your personal or professional life.
-
-
FAQs
-
Here are some frequently asked questions about desarrollo del pensamiento tomo 2 resuelto pdf 27:
-
-
What is the purpose of the book? The purpose of the book is to help you develop your thinking skills in various aspects, such as logic, reasoning, critical thinking, problem solving, creativity and innovation.
-
Who is the author of the book? The book is written by a team of experts from different fields and disciplines, such as mathematics, philosophy, psychology, engineering and education.
-
How long is the book? The book is about 400 pages long. It contains 27 chapters divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation.
-
How can I get a copy of the book? You can get a copy of the book online by downloading it from Sway, reading it on Scribd or buying it from Amazon. You can also find it in some libraries or bookstores.
-
How can I use the book effectively? You can use the book effectively by following the exercises and examples in the book, applying the concepts to real-life situations and joining a study group or a forum.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md
deleted file mode 100644
index 66a0a5e98254d1df73466ae226c0055d2a13e78f..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
How to Download V-Ray SketchUp 2016 64 Bit Full Crack
-
V-Ray is a powerful rendering engine that can enhance the quality and realism of your 3D models and scenes. It is compatible with SketchUp, a popular 3D modeling and design software that can create stunning architectural and interior designs. If you want to download V-Ray SketchUp 2016 64 bit full crack for free, you are in the right place. In this article, we will show you how to download and install V-Ray SketchUp 2016 64 bit full crack on your PC.
V-Ray SketchUp 2016 64 bit full crack is a cracked version of V-Ray SketchUp 2016 64 bit, which is a plugin that adds rendering capabilities to SketchUp. With V-Ray SketchUp 2016 64 bit full crack, you can render photorealistic images and animations with advanced lighting, materials, and camera settings. You can also use V-Ray SketchUp 2016 64 bit full crack to create realistic effects such as depth of field, motion blur, fog, caustics, and more.
-
V-Ray SketchUp 2016 64 bit full crack has many features and benefits, such as:
-
-
It has a friendly user interface and supports 4K monitors.
-
It can render any type of natural or artificial lighting with a wide range of built-in light types.
-
It can render photorealistic rooms and interiors with powerful and fast global illumination.
-
It can emit light from any scene object to simulate real-world custom light shapes.
-
It can simulate natural looking skies with realistic atmospheric depth.
-
It can handle complex geometry and large scenes with ease.
-
It can integrate with other SketchUp features and extensions.
-
It can export VR-ready content for virtual reality devices.
-
-
How to Download V-Ray SketchUp 2016 64 Bit Full Crack?
-
To download V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:
Extract the downloaded file with WinRAR or any other file compression software.
-
Run the installer file "SketchUpPro-en-x64.exe" and follow the instructions to install SketchUp Pro 2016 on your PC.
-
After the installation is complete, unzip the file "SketchUp Pro 2016 x64-patch.zip". Inside it, you will find a patcher file named "su2015-64-patch.exe".
-
Copy and paste the patcher file to the folder where you installed SketchUp (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016).
-
Run the patcher file as administrator and click on the patch button. You will see a message saying "Can not find the file. Search the file?". Click on "Yes".
-
A new window will open. Browse to the folder "LayOut" (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016\\LayOut) and select the file "LayOut.exe". Click on "Open".
-
The patcher will patch the file and show a message saying "The file has been patched!". Click on "OK".
-
Repeat steps 6 to 8 for the files "Style Builder.exe" and "SketchUp.exe" in their respective folders.
-
You have now successfully installed V-Ray SketchUp 2016 64 bit full crack on your PC.
-
-
How to Use V-Ray SketchUp 2016 64 Bit Full Crack?
-
To use V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:
-
-
-
Launch Sketch ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md
deleted file mode 100644
index 8eae6367ec34b109ece7ee4bc8d65959e587702c..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
This text contains a list of file names and links related to Celemony Melodyne Editor, a software for editing audio files. The file names have different extensions, such as .rar, .zip, .html, and indicate the version number (v2.1.1.15), the release group (R2R), and the presence of a crack (a program that bypasses the software's copy protection). The file size is 84.8 MB for most of the files. The links at the end of the text point to websites that offer downloads of other files, such as a summary of biology for high school students in PDF format, a physics textbook for class 9 in PDF format, and a Hindi comedy movie in 720p resolution.
Celemony Melodyne Editor is a software that allows users to manipulate audio files in various ways, such as changing the pitch, tempo, timing, and tone of individual notes or entire tracks. It can also correct intonation and timing errors, create harmonies and melodies, and transcribe audio into musical notation. Celemony Melodyne Editor is compatible with Windows and Mac operating systems, and can be used as a standalone application or as a plug-in for other audio editing software.
-
-
The files listed in the text are compressed archives that contain the installation files and the crack for Celemony Melodyne Editor. A crack is a program that modifies the software's code to bypass its copy protection and allow users to use it without a license or activation key. However, using a crack is illegal and risky, as it may contain malware or viruses that can harm the user's computer or data. Moreover, using a cracked software may result in poor performance, errors, or compatibility issues with other software or hardware.
-
-
The links at the end of the text are unrelated to Celemony Melodyne Editor and seem to be spam or phishing attempts. They direct the user to websites that offer downloads of other files that may be of interest to some users, such as educational materials or entertainment content. However, these websites may also contain malware or viruses that can harm the user's computer or data. Furthermore, downloading these files may infringe the intellectual property rights of the original authors or creators. Therefore, it is advisable to avoid clicking on these links and to delete the text.
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md b/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md
deleted file mode 100644
index 962254a20601b7e9dd3a934ff5f93abe138ac33b..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-Current sounds can only be downloaded using the Steinberg Download Assistant. ... 1, MAC WINDOWS, Groove Agent ONE/SE/4 VST Toolkit, 800MB.... 3, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2GB. ...
-4, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2 GB
-Jul 12 2019 Download.
-Groove Agent SE 5.0 VST, AAX, AU WIN.OSX x86 x64 Release Year/Date: 05.2019 Version: 5.0 Developer: Steinberg Website
-Feb 7
-2014 · Groove Agent SE 5.0.
-Description: Steinberg Groove Agent puts at your disposal a set of tools and ... VST, AAX, AU
-Mar 9 2015 Download torrent for free.
-distribution statistics. ... 8a78ff9644
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md
deleted file mode 100644
index 1a58c33f2ea2691aeeb4ddabc2f3ead811761c26..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
CarX Drift Racing Lite Mod APK OBB: A Guide for Drift Racing Fans
-
Do you love drifting and racing games? Do you want to experience the thrill of driving realistic cars on challenging tracks? If yes, then you should try CarX Drift Racing Lite, a popular game that lets you enjoy the best of both worlds. And if you want to make the game even more fun and exciting, you should download CarX Drift Racing Lite Mod APK OBB, a modified version that gives you unlimited money, coins, cars, tracks, and more. In this article, we will tell you everything you need to know about CarX Drift Racing Lite and its mod apk obb version.
-
What is CarX Drift Racing Lite?
-
CarX Drift Racing Lite is a racing game that focuses on drifting, a driving technique where the driver intentionally oversteers the car to make it slide sideways. The game is developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics for games. CarX Drift Racing Lite is a lite version of CarX Drift Racing, which means it has fewer cars, tracks, and features than the original game. However, it still offers a lot of fun and entertainment for drift racing fans.
One of the main attractions of CarX Drift Racing Lite is its realistic physics and graphics. The game uses a sophisticated car physics engine that simulates the behavior of real cars on different surfaces and conditions. The game also has stunning graphics that create a immersive environment for the players. You can see the smoke, dust, sparks, and tire marks as you drift your car on the track. You can also feel the vibration and sound effects as you accelerate, brake, and steer your car.
-
Customizable cars and tracks
-
Another feature of CarX Drift Racing Lite is its customizable cars and tracks. The game allows you to choose from a variety of cars, each with its own characteristics and performance. You can also customize your car's appearance, color, wheels, engine, suspension, and more. You can also choose from different tracks, each with its own layout, difficulty, and scenery. You can also adjust the weather, time of day, and camera angle to suit your preference.
-
Online and offline modes
-
A third feature of CarX Drift Racing Lite is its online and offline modes. The game lets you play either online or offline, depending on your internet connection and mood. If you play online, you can compete with other players from around the world in various modes such as time attack, ghost mode, or multiplayer mode. You can also chat with other players and share your replays and screenshots. If you play offline, you can practice your skills in single-player mode or challenge yourself in career mode.
-
Why download CarX Drift Racing Lite Mod APK OBB?
-
If you are already enjoying CarX Drift Racing Lite, you might wonder why you should download CarX Drift Racing Lite Mod APK OBB. Well, the answer is simple: because it makes the game even better. CarX Drift Racing Lite Mod APK OBB is a modified version of the game that gives you access to unlimited money
No ads and no root required
-
With CarX Drift Racing Lite Mod APK OBB, you don't have to deal with annoying ads and pop-ups that interrupt your gameplay. You can enjoy the game without any distractions or interruptions. You also don't need to root your device to install the mod apk obb files. You can simply follow the instructions below and enjoy the game safely and smoothly.
-
How to download and install CarX Drift Racing Lite Mod APK OBB?
-
Step 1: Download the mod apk and obb files from a trusted source
-
The first step is to download the mod apk and obb files from a trusted source. You can use the link provided at the end of this article to download the files. Make sure you have enough storage space on your device before downloading the files.
-
Step 2: Enable unknown sources on your device settings
-
The second step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may also need to disable any antivirus or security apps that may interfere with the installation process.
-
carx drift racing lite mod apk obb download
-carx drift racing lite mod apk obb unlimited money
-carx drift racing lite mod apk obb latest version
-carx drift racing lite mod apk obb android 1
-carx drift racing lite mod apk obb revdl
-carx drift racing lite mod apk obb rexdl
-carx drift racing lite mod apk obb offline
-carx drift racing lite mod apk obb hack
-carx drift racing lite mod apk obb free
-carx drift racing lite mod apk obb data
-carx drift racing lite mod apk obb file
-carx drift racing lite mod apk obb full
-carx drift racing lite mod apk obb mega
-carx drift racing lite mod apk obb mediafire
-carx drift racing lite mod apk obb google drive
-carx drift racing lite mod apk obb 2023
-carx drift racing lite mod apk obb update
-carx drift racing lite mod apk obb new
-carx drift racing lite mod apk obb best
-carx drift racing lite mod apk obb premium
-carx drift racing lite mod apk obb pro
-carx drift racing lite mod apk obb vip
-carx drift racing lite mod apk obb unlocked
-carx drift racing lite mod apk obb all cars
-carx drift racing lite mod apk obb no ads
-carx drift racing lite mod apk obb no root
-carx drift racing lite mod apk obb no verification
-carx drift racing lite mod apk obb no survey
-carx drift racing lite mod apk obb easy install
-carx drift racing lite mod apk obb direct link
-carx drift racing lite mod apk obb high quality
-carx drift racing lite mod apk obb realistic graphics
-carx drift racing lite mod apk obb smooth gameplay
-carx drift racing lite mod apk obb awesome features
-carx drift racing lite mod apk obb fun modes
-carx drift racing lite mod apk obb online multiplayer
-carx drift racing lite mod apk obb custom cars
-carx drift racing lite mod apk obb tuning options
-carx drift racing lite mod apk obb drifting physics
-carx drift racing lite mod apk obb sound effects
-carx drift racing lite mod apk obb music tracks
-carx drift racing lite mod apk obb leaderboards
-carx drift racing lite mod apk obb achievements
-carx drift racing lite mod apk obb rewards
-carx drift racing lite mod apk obb cheats
-carx drift racing lite mod apk obb tips tricks
-carx drift racing lite mod apk obb guide tutorial
-carx drift racing lite mod apk obb review rating
-carx drift racing lite mod apk obb gameplay video
-
Step 3: Install the mod apk file and extract the obb file to the Android/obb folder
-
The third step is to install the mod apk file and extract the obb file to the Android/obb folder. To do this, locate the downloaded files on your device, then tap on the mod apk file and follow the instructions to install it. Then, use a file manager app to extract the obb file to the Android/obb folder. If you don't have a file manager app, you can download one from the Google Play Store. Make sure you create a folder named com.CarXTech.CarXDriftRacingLite inside the Android/obb folder and place the extracted obb file there.
-
Conclusion
-
CarX Drift Racing Lite is a great game for drift racing fans who want to experience realistic physics and graphics, customizable cars and tracks, and online and offline modes. However, if you want to make the game even more enjoyable and exciting, you should download CarX Drift Racing Lite Mod APK OBB, which gives you unlimited money, coins, cars, tracks, and more. You can download CarX Drift Racing Lite Mod APK OBB from the link below and follow the steps above to install it on your device. Have fun drifting and racing!
-
FAQs
-
Here are some of the frequently asked questions about CarX Drift Racing Lite Mod APK OBB:
-
-
Is CarX Drift Racing Lite Mod APK OBB safe to use?
-
Yes, CarX Drift Racing Lite Mod APK OBB is safe to use as long as you download it from a trusted source and follow the installation instructions carefully. However, you should always be careful when downloading and installing any mod apk obb files from unknown sources as they may contain viruses or malware that can harm your device.
-
Is CarX Drift Racing Lite Mod APK OBB compatible with my device?
-
CarX Drift Racing Lite Mod APK OBB is compatible with most Android devices that have Android 4.1 or higher versions. However, some devices may not support some of the features or functions of the game due to hardware or software limitations.
-
Can I play CarX Drift Racing Lite Mod APK OBB with my friends?
-
Yes, you can play CarX Drift Racing Lite Mod APK OBB with your friends online in multiplayer mode. You can also chat with them and share your replays and screenshots.
-
Can I update CarX Drift Racing Lite Mod APK OBB?
-
No, you cannot update CarX Drift Racing Lite Mod APK OBB as it is a modified version of the game that may not be compatible with the latest updates from the official developers. If you want to update the game, you will have to uninstall the mod apk obb files and install the original version from the Google Play Store.
-
Where can I download CarX Drift Racing Lite Mod APK OBB?
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md
deleted file mode 100644
index 049d638a7d7f013ef0ef513349501dfbd29a3b20..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
Euro Truck Simulator 2 For Mobile - Everything You Need to Know
-
Do you love driving trucks and exploring new places? Do you want to experience the thrill of being a truck driver from the comfort of your home? If you answered yes to any of these questions, then you should definitely check out Euro Truck Simulator 2, one of the most popular and realistic truck driving simulator games ever made. And the best part is, you can now play it on your mobile device thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. In this article, we will tell you everything you need to know about Euro Truck Simulator 2 for mobile, including what it is, how to download and install it, how to play it, and why you should try it today.
Euro Truck Simulator 2, or ETS2 for short, is a game that simulates the life of a truck driver in Europe. It was developed and published by SCS Software, a Czech company that specializes in creating simulation games. ETS2 was released in 2012 for Windows, Linux, and Mac OS, and has since received many updates and expansions that added new features, content, and improvements. ETS2 has three main aspects that make it so appealing and realistic: a truck driving simulator, a huge map of Europe, and a variety of trucks and customization options.
-
A realistic truck driving simulator game
-
ETS2 is not just a game where you drive a truck from point A to point B. It is a game where you have to follow the rules of the road, deal with traffic, weather, fuel consumption, fatigue, cargo delivery, fines, repairs, and more. You have to plan your routes carefully, choose the best contracts, manage your finances, hire drivers, buy garages, and grow your own trucking company. You also have to take care of your truck, which can get damaged or break down if you drive recklessly or neglect maintenance. You can also customize your truck with different parts, accessories, paint jobs, decals, and more.
-
A huge map of Europe to explore
-
ETS2 features a massive map of Europe that covers over 70 cities in 13 countries. You can drive across different landscapes, such as mountains, forests, fields, deserts, coasts, and urban areas. You can also visit famous landmarks, such as the Eiffel Tower in Paris, the Brandenburg Gate in Berlin, the Colosseum in Rome, and more. The map is constantly updated with new regions and roads that add more diversity and realism to the game. You can also download mods that add even more countries and locations to the game.
-
A variety of trucks and customization options
-
ETS2 offers a wide range of trucks from different manufacturers, such as Mercedes-Benz, Volvo, Scania, MAN, DAF, Renault, Iveco, and more. Each truck has its own specifications, performance, handling, sound effects, and interior design. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.
-
What is ets2.mobi?
-
ets2.mobi is a website that offers Euro Truck Simulator 2 for mobile devices. It allows you to download and install ETS2 on your Android or iOS phone or tablet without any hassle. You don't need to root or jailbreak your device or use any complicated software or hardware. You just need to follow a few simple steps and you will be able to enjoy ETS2 on your mobile device in no time.
-
How to download and install ETS2 on your phone or tablet
-
Downloading and installing ETS2 on your mobile device is very easy and fast. Here are the steps you need to follow:
-
ets2 mobile apk download
-ets2 android gameplay
-ets2 ios app
-ets2 licensed trucks
-ets2 customization options
-ets2 advanced driving physics
-ets2 truck driving simulator
-ets2 official website
-ets2 modhub
-ets2 mods download
-ets2 best mods
-ets2 realistic mods
-ets2 map mods
-ets2 traffic mods
-ets2 sound mods
-ets2 graphics mods
-ets2 tuning mods
-ets2 trailer mods
-ets2 skin mods
-ets2 truck mods
-ets2 multiplayer mod
-ets2 online mod
-ets2 promods
-ets2 rusmap
-ets2 balkans map
-ets2 scandinavia dlc
-ets2 going east dlc
-ets2 vive la france dlc
-ets2 italia dlc
-ets2 beyond the baltic sea dlc
-ets2 road to the black sea dlc
-ets2 iberia dlc
-ets2 heart of russia dlc
-ets2 cabin accessories dlc
-ets2 wheel tuning pack dlc
-ets2 mighty griffin tuning pack dlc
-ets2 heavy cargo pack dlc
-ets2 special transport dlc
-ets2 high power cargo pack dlc
-ets2 krone trailer pack dlc
-ets2 schwarzmuller trailer pack dlc
-ets2 michelin fan pack dlc
-ets2 goodyear tyres pack dlc
-ets2 actros tuning pack dlc
-ets2 fh tuning pack dlc
-
-
Go to ets2.mobi on your mobile browser and click on the download button.
-
Choose your device type (Android or iOS) and wait for the download to finish.
-
Open the downloaded file and follow the instructions to install ETS2 on your device.
-
Launch the game and enjoy playing ETS2 on your mobile device.
-
-
Note: You may need to enable unknown sources or trust the app in your device settings before installing ETS2. This is a normal procedure for installing apps from outside the official app stores and it does not harm your device or data in any way.
-
The features and benefits of playing ETS2 on mobile
-
Playing ETS2 on your mobile device has many advantages over playing it on a PC or console. Here are some of them:
-
-
You can play ETS2 anytime and anywhere you want, as long as you have your mobile device with you.
-
You can save space and money, as you don't need to buy or maintain a PC or console to play ETS2.
-
You can enjoy the same graphics, gameplay, and content as the PC version of ETS2, as ets2.mobi uses a special technology that optimizes the game for mobile devices without compromising quality or performance.
-
You can connect with other players online and join multiplayer sessions, chat with them, share your progress, and more.
-
You can access exclusive features and bonuses that are only available for mobile users, such as special trucks, skins, events, rewards, and more.
-
-
How to play ETS2 on mobile?
-
Playing ETS2 on your mobile device is very similar to playing it on a PC or console. You just need to learn the controls and interface of the game and you will be ready to hit the road. Here are some tips and tricks to help you get started:
-
The controls and interface of ETS2 on mobile
-
The controls and interface of ETS2 on mobile are designed to be intuitive and user-friendly. You can choose between different control modes, such as tilt, touch, or steering wheel. You can also customize the buttons, sensitivity, and layout of the controls according to your preference. You can also use voice commands to control some functions of the game, such as navigation, radio, or horn.
-
The interface of ETS2 on mobile consists of various elements that display important information and options for the game. You can see your speedometer, fuel gauge, damage indicator, map, GPS, mirrors, dashboard, and more. You can also access the menu, settings, profile, achievements, statistics, leaderboards, and more. You can also interact with various objects and characters in the game, such as toll booths, gas stations, rest areas, traffic lights, pedestrians, police officers, and more.
-
The game modes and challenges of ETS2 on mobile
-
ETS2 on mobile offers various game modes and challenges that suit different play styles and preferences. You can choose between different difficulty levels, such as easy, normal, or hard, depending on how realistic and challenging you want the game to be. You can also choose between different game modes, such as: - Career mode: This is the main mode of the game, where you start as a rookie driver and work your way up to become a successful trucker. You have to complete various contracts, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and expand your business. You can also customize your profile, choose your preferred truck brand, and join a company of your choice. - Free mode: This is a mode where you can drive freely across the map without any time or money constraints. You can explore different regions, visit landmarks, test different trucks, and enjoy the scenery. You can also switch between day and night, change the weather, and adjust the traffic density. - Challenge mode: This is a mode where you can test your skills and compete with other players in various challenges, such as parking, racing, cargo delivery, fuel economy, and more. You can also create your own challenges and share them with other players online.
The tips and tricks to enjoy ETS2 on mobile
-
ETS2 on mobile is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. Here are some tips and tricks that can help you enjoy ETS2 on mobile more: - Follow the tutorial: The game offers a tutorial that teaches you the basics of the game, such as how to drive, park, deliver cargo, use the GPS, and more. It is highly recommended that you follow the tutorial before you start playing the game, as it will help you avoid many mistakes and problems later on. - Adjust the settings: The game allows you to adjust various settings that can affect your gameplay experience, such as graphics quality, sound volume, control mode, sensitivity, language, units, and more. You should experiment with different settings and find the ones that suit your device and preference best. - Save frequently: The game has an autosave feature that saves your progress every time you complete a contract or enter a new city. However, it is also advisable that you manually save your game often, especially before you start a long or difficult journey. This way, you can avoid losing your progress or money if something goes wrong or if the game crashes. - Drive carefully: The game simulates realistic driving physics and mechanics, which means that you have to drive carefully and follow the rules of the road. You have to pay attention to your speed limit, traffic signs, signals, lanes, pedestrians, and other vehicles. You also have to watch out for your fuel level, damage, fatigue, and cargo weight. If you drive recklessly or break the law, you can get fined, lose your cargo, damage your truck, or even cause accidents. You can also use the cruise control, speed limiter, and brake assist features to help you drive more smoothly and safely. - Use the GPS: The game provides you with a GPS system that shows you the best route to your destination, the distance and time remaining, the speed limit, and the traffic conditions. You can also use the map view to see the whole map of Europe and plan your routes ahead. You can also set waypoints, zoom in and out, and switch between 2D and 3D modes. The GPS is a very useful tool that can help you navigate the roads and avoid getting lost or stuck. - Enjoy the scenery: The game features stunning graphics and realistic sound effects that create a immersive atmosphere for the game. You can see the changing landscapes, weather, seasons, day and night cycles, and more. You can also listen to the radio, which offers various stations that play different genres of music and news. You can also use the photo mode to take pictures of your truck or the scenery and share them with other players online.
Conclusion
-
Euro Truck Simulator 2 is a game that lets you experience the life of a truck driver in Europe. You can drive across different countries, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and grow your own trucking company. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.
-
ETS2 is now available for mobile devices thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. You can download and install ETS2 on your phone or tablet without any hassle. You can enjoy the same graphics, gameplay, and content as the PC version of ETS2, as well as exclusive features and bonuses for mobile users. You can also connect with other players online and join multiplayer sessions, chat with them, share your progress, and more.
-
ETS2 is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. That's why we have provided you with some tips and tricks that can help you enjoy ETS2 on mobile more.
-
If you love driving trucks and exploring new places, then you should definitely try ETS2 on mobile today. It is a game that will make you feel like a real truck driver in Europe.
-
FAQs
-
Here are some frequently asked questions about ETS2 on mobile:
-
-
Is ETS2 on mobile free?
-
Yes, ETS2 on mobile is free to download and play. However, it may contain some in-app purchases or ads that can enhance your gameplay experience or support the developers.
-
Is ETS2 on mobile safe?
-
Yes, ETS2 on mobile is safe to download and install on your device. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from ets2.mobi or other trusted sources to avoid any risks.
-
Is ETS2 on mobile compatible with my device?
-
ETS2 on mobile is compatible with most Android and iOS devices that have at least 4 GB of RAM and 3 GB of free storage space. However, some devices may have different performance or compatibility issues depending on their specifications or settings.
-
Can I play ETS2 on mobile offline?
-
Yes, you can play ETS2 on mobile offline without an internet connection. However, some features or functions may not work properly or be available offline, such as multiplayer mode, online leaderboards, updates, or downloads. You also need an internet connection to verify your game license and activate it on your device.
-
How can I contact the developers or report a bug?
-
If you have any questions, feedback, suggestions, or issues regarding ETS2 on mobile, you can contact the developers or report a bug through the following channels:
-
-
Email: support@ets2.mobi
-
Facebook: https://www.facebook.com/ets2mobi
-
Twitter: https://twitter.com/ets2mobi
-
Instagram: https://www.instagram.com/ets2mobi
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/2023Liu2023/bingo/src/components/chat-list.tsx b/spaces/2023Liu2023/bingo/src/components/chat-list.tsx
deleted file mode 100644
index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/components/chat-list.tsx
+++ /dev/null
@@ -1,28 +0,0 @@
-import React from 'react'
-
-import { Separator } from '@/components/ui/separator'
-import { ChatMessage } from '@/components/chat-message'
-import { ChatMessageModel } from '@/lib/bots/bing/types'
-
-export interface ChatList {
- messages: ChatMessageModel[]
-}
-
-export function ChatList({ messages }: ChatList) {
- if (!messages.length) {
- return null
- }
-
- return (
-
- )
-}
diff --git a/spaces/360macky/first-space/app.py b/spaces/360macky/first-space/app.py
deleted file mode 100644
index b178efdb6a5a27e18fec0525a278bdd2ede2b19c..0000000000000000000000000000000000000000
--- a/spaces/360macky/first-space/app.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import streamlit as st
-
-x = st.slider('Select a value')
-st.write(x, 'squared is', x * x)
-
diff --git a/spaces/4Taps/SadTalker/src/test_audio2coeff.py b/spaces/4Taps/SadTalker/src/test_audio2coeff.py
deleted file mode 100644
index 3db6be3af59b0319c50106d9a92c903118f28410..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/test_audio2coeff.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import torch
-import numpy as np
-from scipy.io import savemat
-from yacs.config import CfgNode as CN
-from scipy.signal import savgol_filter
-
-from src.audio2pose_models.audio2pose import Audio2Pose
-from src.audio2exp_models.networks import SimpleWrapperV2
-from src.audio2exp_models.audio2exp import Audio2Exp
-
-def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"):
- checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
- if model is not None:
- model.load_state_dict(checkpoint['model'])
- if optimizer is not None:
- optimizer.load_state_dict(checkpoint['optimizer'])
-
- return checkpoint['epoch']
-
-class Audio2Coeff():
-
- def __init__(self, audio2pose_checkpoint, audio2pose_yaml_path,
- audio2exp_checkpoint, audio2exp_yaml_path,
- wav2lip_checkpoint, device):
- #load config
- fcfg_pose = open(audio2pose_yaml_path)
- cfg_pose = CN.load_cfg(fcfg_pose)
- cfg_pose.freeze()
- fcfg_exp = open(audio2exp_yaml_path)
- cfg_exp = CN.load_cfg(fcfg_exp)
- cfg_exp.freeze()
-
- # load audio2pose_model
- self.audio2pose_model = Audio2Pose(cfg_pose, wav2lip_checkpoint, device=device)
- self.audio2pose_model = self.audio2pose_model.to(device)
- self.audio2pose_model.eval()
- for param in self.audio2pose_model.parameters():
- param.requires_grad = False
- try:
- load_cpk(audio2pose_checkpoint, model=self.audio2pose_model, device=device)
- except:
- raise Exception("Failed in loading audio2pose_checkpoint")
-
- # load audio2exp_model
- netG = SimpleWrapperV2()
- netG = netG.to(device)
- for param in netG.parameters():
- netG.requires_grad = False
- netG.eval()
- try:
- load_cpk(audio2exp_checkpoint, model=netG, device=device)
- except:
- raise Exception("Failed in loading audio2exp_checkpoint")
- self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False)
- self.audio2exp_model = self.audio2exp_model.to(device)
- for param in self.audio2exp_model.parameters():
- param.requires_grad = False
- self.audio2exp_model.eval()
-
- self.device = device
-
- def generate(self, batch, coeff_save_dir, pose_style):
-
- with torch.no_grad():
- #test
- results_dict_exp= self.audio2exp_model.test(batch)
- exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64
-
- #for class_id in range(1):
- #class_id = 0#(i+10)%45
- #class_id = random.randint(0,46) #46 styles can be selected
- batch['class'] = torch.LongTensor([pose_style]).to(self.device)
- results_dict_pose = self.audio2pose_model.test(batch)
- pose_pred = results_dict_pose['pose_pred'] #bs T 6
-
- pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device)
- coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70
-
- coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy()
-
- savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])),
- {'coeff_3dmm': coeffs_pred_numpy})
-
- return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name']))
-
-
diff --git a/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py b/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py
deleted file mode 100644
index 19f11110ea822eeb140fb885c600536290a1adff..0000000000000000000000000000000000000000
--- a/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py
+++ /dev/null
@@ -1,346 +0,0 @@
-import os
-import logging
-
-logger = logging.getLogger(__name__)
-
-import librosa
-import numpy as np
-import soundfile as sf
-import torch
-
-from infer.lib.uvr5_pack.lib_v5 import nets_61968KB as Nets
-from infer.lib.uvr5_pack.lib_v5 import spec_utils
-from infer.lib.uvr5_pack.lib_v5.model_param_init import ModelParameters
-from infer.lib.uvr5_pack.lib_v5.nets_new import CascadedNet
-from infer.lib.uvr5_pack.utils import inference
-
-
-class AudioPre:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json")
- model = Nets.CascadedASPPNet(mp.param["bins"] * 2)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"):
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- logger.info("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- logger.info("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
-
-
-class AudioPreDeEcho:
- def __init__(self, agg, model_path, device, is_half):
- self.model_path = model_path
- self.device = device
- self.data = {
- # Processing Options
- "postprocess": False,
- "tta": False,
- # Constants
- "window_size": 512,
- "agg": agg,
- "high_end_process": "mirroring",
- }
- mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json")
- nout = 64 if "DeReverb" in model_path else 48
- model = CascadedNet(mp.param["bins"] * 2, nout)
- cpk = torch.load(model_path, map_location="cpu")
- model.load_state_dict(cpk)
- model.eval()
- if is_half:
- model = model.half().to(device)
- else:
- model = model.to(device)
-
- self.mp = mp
- self.model = model
-
- def _path_audio_(
- self, music_file, vocal_root=None, ins_root=None, format="flac"
- ): # 3个VR模型vocal和ins是反的
- if ins_root is None and vocal_root is None:
- return "No save root."
- name = os.path.basename(music_file)
- if ins_root is not None:
- os.makedirs(ins_root, exist_ok=True)
- if vocal_root is not None:
- os.makedirs(vocal_root, exist_ok=True)
- X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
- bands_n = len(self.mp.param["band"])
- # print(bands_n)
- for d in range(bands_n, 0, -1):
- bp = self.mp.param["band"][d]
- if d == bands_n: # high-end band
- (
- X_wave[d],
- _,
- ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑
- music_file,
- bp["sr"],
- False,
- dtype=np.float32,
- res_type=bp["res_type"],
- )
- if X_wave[d].ndim == 1:
- X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]])
- else: # lower bands
- X_wave[d] = librosa.core.resample(
- X_wave[d + 1],
- self.mp.param["band"][d + 1]["sr"],
- bp["sr"],
- res_type=bp["res_type"],
- )
- # Stft of wave source
- X_spec_s[d] = spec_utils.wave_to_spectrogram_mt(
- X_wave[d],
- bp["hl"],
- bp["n_fft"],
- self.mp.param["mid_side"],
- self.mp.param["mid_side_b2"],
- self.mp.param["reverse"],
- )
- # pdb.set_trace()
- if d == bands_n and self.data["high_end_process"] != "none":
- input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + (
- self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"]
- )
- input_high_end = X_spec_s[d][
- :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, :
- ]
-
- X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp)
- aggresive_set = float(self.data["agg"] / 100)
- aggressiveness = {
- "value": aggresive_set,
- "split_bin": self.mp.param["band"][1]["crop_stop"],
- }
- with torch.no_grad():
- pred, X_mag, X_phase = inference(
- X_spec_m, self.device, self.model, aggressiveness, self.data
- )
- # Postprocess
- if self.data["postprocess"]:
- pred_inv = np.clip(X_mag - pred, 0, np.inf)
- pred = spec_utils.mask_silence(pred, pred_inv)
- y_spec_m = pred * X_phase
- v_spec_m = X_spec_m - y_spec_m
-
- if ins_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], y_spec_m, input_high_end, self.mp
- )
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(
- y_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp)
- logger.info("%s instruments done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- ins_root,
- "instrument_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- ) #
- else:
- path = os.path.join(
- ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_instrument) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
- if vocal_root is not None:
- if self.data["high_end_process"].startswith("mirroring"):
- input_high_end_ = spec_utils.mirroring(
- self.data["high_end_process"], v_spec_m, input_high_end, self.mp
- )
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(
- v_spec_m, self.mp, input_high_end_h, input_high_end_
- )
- else:
- wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp)
- logger.info("%s vocals done" % name)
- if format in ["wav", "flac"]:
- sf.write(
- os.path.join(
- vocal_root,
- "vocal_{}_{}.{}".format(name, self.data["agg"], format),
- ),
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- else:
- path = os.path.join(
- vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"])
- )
- sf.write(
- path,
- (np.array(wav_vocals) * 32768).astype("int16"),
- self.mp.param["sr"],
- )
- if os.path.exists(path):
- os.system(
- "ffmpeg -i %s -vn %s -q:a 2 -y"
- % (path, path[:-4] + ".%s" % format)
- )
diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py b/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py
deleted file mode 100644
index 9a94bad5bfb30493f7364f2e52cbb4badbccb2c7..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from pyrender import Mesh, Scene, Viewer
-from io import BytesIO
-import numpy as np
-import trimesh
-import requests
-
-duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb"
-
-duck = trimesh.load(BytesIO(requests.get(duck_source).content), file_type='glb')
-duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0])
-scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]))
-scene.add(duckmesh)
-Viewer(scene)
diff --git a/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md b/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md
deleted file mode 100644
index b98c0cb21bcd18f4bbec2f622d0aa58000bffc8b..0000000000000000000000000000000000000000
--- a/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 01🗣️ Gradio NLP Speech 2 Text 2 Speech Generator AI Pipeline 🙉
-emoji: 🗣️🎤🙉
-colorFrom: blue
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.9.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py
deleted file mode 100644
index b6f793751904658b3e7e01a5ffdaa6b86e156e66..0000000000000000000000000000000000000000
--- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# model settings
-model = dict(
- type='ImageClassifier',
- backbone=dict(
- type='ResNet',
- depth=50,
- num_stages=4,
- out_indices=(3, ),
- style='pytorch'),
- neck=dict(type='GlobalAveragePooling'),
- head=dict(
- type='LinearClsHead',
- num_classes=1000,
- in_channels=2048,
- loss=dict(
- type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0),
- topk=(1, 5),
- ))
diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py
deleted file mode 100644
index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000
--- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from concurrent.futures import ProcessPoolExecutor
-from functools import wraps
-import hashlib
-import logging
-import typing as tp
-
-import flashy
-import flashy.distrib
-import omegaconf
-import torch
-from torch.nn.utils.rnn import pad_sequence
-
-
-logger = logging.getLogger(__name__)
-
-
-def dict_from_config(cfg: omegaconf.DictConfig) -> dict:
- """Convenience function to map an omegaconf configuration to a dictionary.
-
- Args:
- cfg (omegaconf.DictConfig): Original configuration to map to dict.
- Returns:
- dict: Config as dictionary object.
- """
- dct = omegaconf.OmegaConf.to_container(cfg, resolve=True)
- assert isinstance(dct, dict)
- return dct
-
-
-def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset:
- if max_samples >= len(dataset):
- return dataset
-
- generator = torch.Generator().manual_seed(seed)
- perm = torch.randperm(len(dataset), generator=generator)
- return torch.utils.data.Subset(dataset, perm[:max_samples].tolist())
-
-
-def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int,
- num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader:
- """Convenience function to load dataset into a dataloader with optional subset sampling.
-
- Args:
- dataset: Dataset to load.
- num_samples (Optional[int]): Number of samples to limit subset size.
- batch_size (int): Batch size.
- num_workers (int): Number of workers for data loading.
- seed (int): Random seed.
- """
- if num_samples is not None:
- dataset = random_subset(dataset, num_samples, seed)
-
- dataloader = flashy.distrib.loader(
- dataset,
- batch_size=batch_size,
- num_workers=num_workers,
- **kwargs
- )
- return dataloader
-
-
-def get_dataset_from_loader(dataloader):
- dataset = dataloader.dataset
- if isinstance(dataset, torch.utils.data.Subset):
- return dataset.dataset
- else:
- return dataset
-
-
-def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None):
- """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension.
-
- Args:
- input (torch.Tensor): The input tensor containing probabilities.
- num_samples (int): Number of samples to draw.
- replacement (bool): Whether to draw with replacement or not.
- Keywords args:
- generator (torch.Generator): A pseudorandom number generator for sampling.
- Returns:
- torch.Tensor: Last dimension contains num_samples indices
- sampled from the multinomial probability distribution
- located in the last dimension of tensor input.
- """
- input_ = input.reshape(-1, input.shape[-1])
- output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator)
- output = output_.reshape(*list(input.shape[:-1]), -1)
- return output
-
-
-def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor:
- """Sample next token from top K values along the last dimension of the input probs tensor.
-
- Args:
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
- k (int): The k in “top-k”.
- Returns:
- torch.Tensor: Sampled tokens.
- """
- top_k_value, _ = torch.topk(probs, k, dim=-1)
- min_value_top_k = top_k_value[..., [-1]]
- probs *= (probs >= min_value_top_k).float()
- probs.div_(probs.sum(dim=-1, keepdim=True))
- next_token = multinomial(probs, num_samples=1)
- return next_token
-
-
-def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor:
- """Sample next token from top P probabilities along the last dimension of the input probs tensor.
-
- Args:
- probs (torch.Tensor): Input probabilities with token candidates on the last dimension.
- p (int): The p in “top-p”.
- Returns:
- torch.Tensor: Sampled tokens.
- """
- probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
- probs_sum = torch.cumsum(probs_sort, dim=-1)
- mask = probs_sum - probs_sort > p
- probs_sort *= (~mask).float()
- probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
- next_token = multinomial(probs_sort, num_samples=1)
- next_token = torch.gather(probs_idx, -1, next_token)
- return next_token
-
-
-class DummyPoolExecutor:
- """Dummy pool executor to use when we actually have only 1 worker.
- (e.g. instead of ProcessPoolExecutor).
- """
- class DummyResult:
- def __init__(self, func, *args, **kwargs):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- def result(self):
- return self.func(*self.args, **self.kwargs)
-
- def __init__(self, workers, mp_context=None):
- pass
-
- def submit(self, func, *args, **kwargs):
- return DummyPoolExecutor.DummyResult(func, *args, **kwargs)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, exc_tb):
- return
-
-
-def get_pool_executor(num_workers: int, mp_context=None):
- return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1)
-
-
-def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor:
- """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences).
- For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]]
-
- Args:
- lengths (torch.Tensor): tensor with lengths
- max_len (int): can set the max length manually. Defaults to None.
- Returns:
- torch.Tensor: mask with 0s where there is pad tokens else 1s
- """
- assert len(lengths.shape) == 1, "Length shape should be 1 dimensional."
- final_length = lengths.max().item() if not max_len else max_len
- final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor
- return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None]
-
-
-def hash_trick(word: str, vocab_size: int) -> int:
- """Hash trick to pair each word with an index
-
- Args:
- word (str): word we wish to convert to an index
- vocab_size (int): size of the vocabulary
- Returns:
- int: index of the word in the embedding LUT
- """
- hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16)
- return hash % vocab_size
-
-
-def with_rank_rng(base_seed: int = 1234):
- """Decorator for a function so that the function will use a Random Number Generator
- whose state depend on the GPU rank. The original RNG state is restored upon returning.
-
- Args:
- base_seed (int): Random seed.
- """
- def _decorator(fun: tp.Callable):
- @wraps(fun)
- def _decorated(*args, **kwargs):
- state = torch.get_rng_state()
- seed = base_seed ^ flashy.distrib.rank()
- torch.manual_seed(seed)
- logger.debug('Rank dependent seed set to %d', seed)
- try:
- return fun(*args, **kwargs)
- finally:
- torch.set_rng_state(state)
- logger.debug('RNG state restored.')
- return _decorated
- return _decorator
-
-
-def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]:
- """Get a list of tensors and collate them to a single tensor. according to the following logic:
- - `dim` specifies the time dimension which will be stacked and padded.
- - The output will contain 1 new dimension (dimension index 0) which will be the size of
- of the original list.
-
- Args:
- tensors (tp.List[torch.Tensor]): List of tensors to collate.
- dim (int): Dimension which will be stacked and padded.
- Returns:
- tp.Tuple[torch.Tensor, torch.Tensor]:
- torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension
- (dimension index 0) which will be the size of the original list.
- torch.Tensor: Tensor containing length of original tensor sizes (without padding).
- """
- tensors = [x.transpose(0, dim) for x in tensors]
- lens = torch.LongTensor([len(x) for x in tensors])
- padded_tensors = pad_sequence(tensors)
- padded_tensors = padded_tensors.transpose(0, 1)
- padded_tensors = padded_tensors.transpose(1, dim + 1)
- return padded_tensors, lens
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py
deleted file mode 100644
index 73b8acea41994a4e740791f66d90241fcc5da747..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from __future__ import annotations
-
-import time, hashlib, random
-
-from ..typing import AsyncGenerator
-from ..requests import StreamSession
-from .base_provider import AsyncGeneratorProvider
-
-domains = [
- 'https://k.aifree.site',
- 'https://p.aifree.site'
-]
-
-class FreeGpt(AsyncGeneratorProvider):
- url = "https://freegpts1.aifree.site/"
- supports_gpt_35_turbo = True
- working = True
-
- @classmethod
- async def create_async_generator(
- cls,
- model: str,
- messages: list[dict[str, str]],
- timeout: int = 30,
- **kwargs
- ) -> AsyncGenerator:
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
- prompt = messages[-1]["content"]
- timestamp = int(time.time())
- data = {
- "messages": messages,
- "time": timestamp,
- "pass": None,
- "sign": generate_signature(timestamp, prompt)
- }
- url = random.choice(domains)
- async with session.post(f"{url}/api/generate", json=data) as response:
- response.raise_for_status()
- async for chunk in response.iter_content():
- yield chunk.decode()
-
- @classmethod
- @property
- def params(cls):
- params = [
- ("model", "str"),
- ("messages", "list[dict[str, str]]"),
- ("stream", "bool"),
- ]
- param = ", ".join([": ".join(p) for p in params])
- return f"g4f.provider.{cls.__name__} supports: ({param})"
-
-def generate_signature(timestamp: int, message: str, secret: str = ""):
- data = f"{timestamp}:{message}:{secret}"
- return hashlib.sha256(data.encode()).hexdigest()
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md b/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md
deleted file mode 100644
index 74ce28264311247c50cdeb119e93ad31b7b2799f..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# How to contribute
-
-It's important to us that you feel you can contribute towards the evolution of Phaser. This can take many forms: from helping to fix bugs or improve the docs, to adding in new features to the source. This guide should help you in making that process as smooth as possible.
-
-Before contributing, please read the [code of conduct](https://github.com/photonstorm/phaser/blob/master/.github/CODE_OF_CONDUCT.md).
-
-## Reporting issues
-
-[GitHub Issues][0] is the place to report bugs you may have found. When submitting a bug please do the following:
-
-**1. Search for existing issues.** Your bug may have already been fixed, or cannot, or will not, be fixed. So be sure to search the issues first before putting in a duplicate issue.
-
-**2. Not sure if it's a bug?.** Please ask on the [forum][4]. If something is blatantly wrong then post it to GitHub. But if you feel it might just be because you're not sure of expected behavior, then it might save us time, and get you a response faster, if you post it to the Phaser forum instead.
-
-**3. Create an isolated and reproducible test case.** If you are reporting a bug, make sure you also have a minimal, runnable, code example that reproduces the problem you have.
-
-**4. Include a live example.** After narrowing your code down to only the problem areas, make use of [jsFiddle][1], [jsBin][2], [CodePen][5], or a link to your live site so that we can view a live example of the problem.
-
-**5. Share as much information as possible.** Include browser version affected, your OS, version of the library, steps to reproduce, etc. "X isn't working!!!1!" will probably just be closed.
-
-## Support Forum
-
-We have a very active [Phaser Support Forum][4]. If you need general support, or are struggling to understand how to do something or need your code checked over, then we would urge you to post it to our forum. There are a lot of friendly devs in there who can help, as well as the core Phaser team, so it's a great place to get support. You're welcome to report bugs directly on GitHub, but for general support we'd always recommend using the forum first.
-
-## Making Changes
-
-I'm assuming you already have a recent version of [Node](https://nodejs.org) installed locally and can run `npm`. This guide is tested and works on both Windows 10 and OS X.
-
-### 1. Checkout the repos
-
-Check-out both the [Phaser repo](https://github.com/photonstorm/phaser) and the [Phaser 3 Examples Repo](https://github.com/photonstorm/phaser3-examples). Make sure the Phaser 3 Examples repo is saved locally in a folder called `phaser3-examples`, which will be the default for most Git clients.
-
-### 2. Matching Directory Levels
-
-Ensure that both repos live at the same depth in your directory structure. For example: `/usr/home/web/phaser` and `/usr/home/web/phaser3-examples`. This is so the dev build scripts in the Phaser repo can safely copy files to `../phaser3-examples` and have them end up in the correct place.
-
-### 3. Install dependencies
-
-Using your console, run `npm install` or `yarn install` as we've configs for both. This process will install a local copy of webpack and a handful of small support scripts. Note that Yarn on Windows seems to have issues making some packages global, so stick with npm if this is the case.
-
-### 4. Webpack
-
-Making sure you've got both repos checked out, and at the same directory level in your filesystem, issue the command `webpack`. If you can't issue the command then webpack may need [installing globally](https://webpack.js.org/guides/installation/). Webpack will build Phaser and if there are any path errors in the code they'll be flagged during the build process.
-
-What you need is the ability to issue the command `webpack` within the v3 folder and have it work.
-
-### 5. ESLint
-
-There is an ESLint configuration and an Editor Configuration in the v3 folder. **Please adhere to them!** Although not enforced in the build process yet, I will be adding that at a later point. There are lots of tools you can install so your editor of choice will check the ESLint config during development.
-
-To test if your code passes our lint config issue the command `npm run lint`.
-
-## Coding style preferences are not contributions
-
-If your PR is doing little more than changing the Phaser source code into a format / coding style that you prefer then we will automatically close it. All PRs must adhere to the coding style already set-out across the thousands of lines of code in Phaser. Your personal preferences for how things should "look" or be structured do not apply here, sorry. PRs should fix bugs, fix documentation or add features. No changes for the sake of change.
-
-## I don't really like git / node.js, but I can fix this bug
-
-That is fine too. While Pull Requests are the best thing in the world for us, they are not the only way to help. You're welcome to post fixes to our forum or even just email them to us. All we ask is that you still adhere to the guidelines presented here re: ESLint, etc.
-
-## Code Style Guide
-
-We provide an .editorconfig and eslint config for you to use, but generally:
-
-- Use 4 spaces for tabs, never tab characters.
-
-- No trailing whitespace, blank lines should have no whitespace.
-
-- Always favor strict equals `===` unless you *need* to use type coercion.
-
-- Follow conventions already in the code, and listen to eslint. Our config is set-up for a reason.
-
-Thanks to Chad for creating the original Pixi.js Contributing file which we adapted for Phaser.
-
-[0]: https://github.com/photonstorm/phaser/issues
-[1]: http://jsfiddle.net
-[2]: http://jsbin.com/
-[3]: http://nodejs.org
-[4]: https://phaser.discourse.group/
-[5]: https://codepen.io/pen?template=YeEWom "Phaser 3 game template"
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js
deleted file mode 100644
index 1029e82aa2b7b4cb1022b01b94c799c5e7baa8cb..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js
+++ /dev/null
@@ -1,45 +0,0 @@
-import Base from '../base/Base.js';
-import { Circle } from '../utils/Geoms.js'
-import Yoyo from '../utils/Yoyo.js';
-
-const Linear = Phaser.Math.Linear;
-
-class Ball extends Base {
- constructor(scene, config) {
- super(scene, config);
- this.type = 'rexSpinnerBall';
- }
-
- buildShapes() {
- for (var i = 0; i < 3; i++) {
- this.addShape(new Circle());
- }
- }
-
- updateShapes() {
- var centerX = this.centerX;
- var centerY = this.centerY;
- var radius = this.radius;
- var ballRadius = radius * 0.1;
- var lineWidth = Math.ceil(ballRadius * 0.25);
-
- var t = 1 - Yoyo(this.value);
- var trackRadius = Linear(0.3, 0.9, t) * radius;
-
- var shapes = this.getShapes();
- for (var i = 0, cnt = shapes.length; i < cnt; i++) {
- var ball = shapes[i];
- var t = (this.value + (i / cnt)) % 1;
- var angle = Math.PI * 2 * t;
- ball
- .lineStyle(lineWidth, this.color)
- .setRadius(ballRadius)
- .setCenterPosition(
- centerX + Math.cos(angle) * trackRadius,
- centerY + Math.sin(angle) * trackRadius
- );
- }
- }
-}
-
-export default Ball;
\ No newline at end of file
diff --git a/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py b/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py
deleted file mode 100644
index a62b6edffa41529ba828905fb86ca302a01d37cc..0000000000000000000000000000000000000000
--- a/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import os
-import numpy as np
-from PIL import Image
-from skimage import io, img_as_float32, transform
-import torch
-import scipy.io as scio
-
-def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path,
- batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None,
- expression_scale=1.0, still_mode = False, preprocess='crop', size = 256):
-
- semantic_radius = 13
- video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0]
- txt_path = os.path.splitext(coeff_path)[0]
-
- data={}
-
- img1 = Image.open(pic_path)
- source_image = np.array(img1)
- source_image = img_as_float32(source_image)
- source_image = transform.resize(source_image, (size, size, 3))
- source_image = source_image.transpose((2, 0, 1))
- source_image_ts = torch.FloatTensor(source_image).unsqueeze(0)
- source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1)
- data['source_image'] = source_image_ts
-
- source_semantics_dict = scio.loadmat(first_coeff_path)
- generated_dict = scio.loadmat(coeff_path)
-
- if 'full' not in preprocess.lower():
- source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70
- generated_3dmm = generated_dict['coeff_3dmm'][:,:70]
-
- else:
- source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70
- generated_3dmm = generated_dict['coeff_3dmm'][:,:70]
-
- source_semantics_new = transform_semantic_1(source_semantics, semantic_radius)
- source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0)
- source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1)
- data['source_semantics'] = source_semantics_ts
-
- # target
- generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale
-
- if 'full' in preprocess.lower():
- generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1)
-
- if still_mode:
- generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0)
-
- with open(txt_path+'.txt', 'w') as f:
- for coeff in generated_3dmm:
- for i in coeff:
- f.write(str(i)[:7] + ' '+'\t')
- f.write('\n')
-
- target_semantics_list = []
- frame_num = generated_3dmm.shape[0]
- data['frame_num'] = frame_num
- for frame_idx in range(frame_num):
- target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius)
- target_semantics_list.append(target_semantics)
-
- remainder = frame_num%batch_size
- if remainder!=0:
- for _ in range(batch_size-remainder):
- target_semantics_list.append(target_semantics)
-
- target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1
- target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1])
- data['target_semantics_list'] = torch.FloatTensor(target_semantics_np)
- data['video_name'] = video_name
- data['audio_path'] = audio_path
-
- if input_yaw_list is not None:
- yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size)
- data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq)
- if input_pitch_list is not None:
- pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size)
- data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq)
- if input_roll_list is not None:
- roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size)
- data['roll_c_seq'] = torch.FloatTensor(roll_c_seq)
-
- return data
-
-def transform_semantic_1(semantic, semantic_radius):
- semantic_list = [semantic for i in range(0, semantic_radius*2+1)]
- coeff_3dmm = np.concatenate(semantic_list, 0)
- return coeff_3dmm.transpose(1,0)
-
-def transform_semantic_target(coeff_3dmm, frame_index, semantic_radius):
- num_frames = coeff_3dmm.shape[0]
- seq = list(range(frame_index- semantic_radius, frame_index + semantic_radius+1))
- index = [ min(max(item, 0), num_frames-1) for item in seq ]
- coeff_3dmm_g = coeff_3dmm[index, :]
- return coeff_3dmm_g.transpose(1,0)
-
-def gen_camera_pose(camera_degree_list, frame_num, batch_size):
-
- new_degree_list = []
- if len(camera_degree_list) == 1:
- for _ in range(frame_num):
- new_degree_list.append(camera_degree_list[0])
- remainder = frame_num%batch_size
- if remainder!=0:
- for _ in range(batch_size-remainder):
- new_degree_list.append(new_degree_list[-1])
- new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
- return new_degree_np
-
- degree_sum = 0.
- for i, degree in enumerate(camera_degree_list[1:]):
- degree_sum += abs(degree-camera_degree_list[i])
-
- degree_per_frame = degree_sum/(frame_num-1)
- for i, degree in enumerate(camera_degree_list[1:]):
- degree_last = camera_degree_list[i]
- degree_step = degree_per_frame * abs(degree-degree_last)/(degree-degree_last)
- new_degree_list = new_degree_list + list(np.arange(degree_last, degree, degree_step))
- if len(new_degree_list) > frame_num:
- new_degree_list = new_degree_list[:frame_num]
- elif len(new_degree_list) < frame_num:
- for _ in range(frame_num-len(new_degree_list)):
- new_degree_list.append(new_degree_list[-1])
- print(len(new_degree_list))
- print(frame_num)
-
- remainder = frame_num%batch_size
- if remainder!=0:
- for _ in range(batch_size-remainder):
- new_degree_list.append(new_degree_list[-1])
- new_degree_np = np.array(new_degree_list).reshape(batch_size, -1)
- return new_degree_np
-
diff --git a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py b/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py
deleted file mode 100644
index 6f570aad058ae63aaaa6733504d0d5ed4ba190a1..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py
+++ /dev/null
@@ -1,981 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Network architectures from the paper
-"Analyzing and Improving the Image Quality of StyleGAN".
-Matches the original implementation of configs E-F by Karras et al. at
-https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from torch_utils import misc
-from torch_utils import persistence
-from torch_utils.ops import conv2d_resample
-from torch_utils.ops import upfirdn2d
-from torch_utils.ops import bias_act
-from torch_utils.ops import fma
-
-# ----------------------------------------------------------------------------
-
-
-@misc.profiled_function
-def normalize_2nd_moment(x, dim=1, eps=1e-8):
- return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
-
-# ----------------------------------------------------------------------------
-
-
-@misc.profiled_function
-def modulated_conv2d(
- # Input tensor of shape [batch_size, in_channels, in_height, in_width].
- x,
- # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
- weight,
- # Modulation coefficients of shape [batch_size, in_channels].
- styles,
- noise=None, # Optional noise tensor to add to the output activations.
- up=1, # Integer upsampling factor.
- down=1, # Integer downsampling factor.
- padding=0, # Padding with respect to the upsampled image.
- # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
- resample_filter=None,
- demodulate=True, # Apply weight demodulation?
- # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
- flip_weight=True,
- # Perform modulation, convolution, and demodulation as a single fused operation?
- fused_modconv=True,
-):
- batch_size = x.shape[0]
- out_channels, in_channels, kh, kw = weight.shape
- misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
- misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
-
- # Pre-normalize inputs to avoid FP16 overflow.
- if x.dtype == torch.float16 and demodulate:
- weight = weight * (1 / np.sqrt(in_channels * kh * kw) /
- weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk
- styles = styles / \
- styles.norm(float('inf'), dim=1, keepdim=True) # max_I
-
- # Calculate per-sample weights and demodulation coefficients.
- w = None
- dcoefs = None
- if demodulate or fused_modconv:
- w = weight.unsqueeze(0) # [NOIkk]
- w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
- if demodulate:
- dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
- if demodulate and fused_modconv:
- w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
-
- # Execute by scaling the activations before and after the convolution.
- if not fused_modconv:
- x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
- x = conv2d_resample.conv2d_resample(x=x, w=weight.to(
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
- if demodulate and noise is not None:
- x = fma.fma(x, dcoefs.to(x.dtype).reshape(
- batch_size, -1, 1, 1), noise.to(x.dtype))
- elif demodulate:
- x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
- elif noise is not None:
- x = x.add_(noise.to(x.dtype))
- return x
-
- # Execute as one fused op using grouped convolution.
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- batch_size = int(batch_size)
- misc.assert_shape(x, [batch_size, in_channels, None, None])
- x = x.reshape(1, -1, *x.shape[2:])
- w = w.reshape(-1, in_channels, kh, kw)
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
- x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
- x = x.reshape(batch_size, -1, *x.shape[2:])
- if noise is not None:
- x = x.add_(noise)
- return x
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class FullyConnectedLayer(torch.nn.Module):
- def __init__(self,
- in_features, # Number of input features.
- out_features, # Number of output features.
- bias=True, # Apply additive bias before the activation function?
- # Activation function: 'relu', 'lrelu', etc.
- activation='linear',
- lr_multiplier=1, # Learning rate multiplier.
- bias_init=0, # Initial value for the additive bias.
- ):
- super().__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.activation = activation
- self.weight = torch.nn.Parameter(torch.randn(
- [out_features, in_features]) / lr_multiplier)
- self.bias = torch.nn.Parameter(torch.full(
- [out_features], np.float32(bias_init))) if bias else None
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
- self.bias_gain = lr_multiplier
-
- def forward(self, x):
- w = self.weight.to(x.dtype) * self.weight_gain
- b = self.bias
- if b is not None:
- b = b.to(x.dtype)
- if self.bias_gain != 1:
- b = b * self.bias_gain
-
- if self.activation == 'linear' and b is not None:
- x = torch.addmm(b.unsqueeze(0), x, w.t())
- else:
- x = x.matmul(w.t())
- x = bias_act.bias_act(x, b, act=self.activation)
- return x
-
- def extra_repr(self):
- return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class Conv2dLayer(torch.nn.Module):
- def __init__(self,
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- # Width and height of the convolution kernel.
- kernel_size,
- bias=True, # Apply additive bias before the activation function?
- # Activation function: 'relu', 'lrelu', etc.
- activation='linear',
- up=1, # Integer upsampling factor.
- down=1, # Integer downsampling factor.
- # Low-pass filter to apply when resampling activations.
- resample_filter=[1, 3, 3, 1],
- # Clamp the output to +-X, None = disable clamping.
- conv_clamp=None,
- channels_last=False, # Expect the input to have memory_format=channels_last?
- trainable=True, # Update the weights of this layer during training?
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.activation = activation
- self.up = up
- self.down = down
- self.conv_clamp = conv_clamp
- self.register_buffer(
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
- self.padding = kernel_size // 2
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
- self.act_gain = bias_act.activation_funcs[activation].def_gain
-
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
- weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(
- memory_format=memory_format)
- bias = torch.zeros([out_channels]) if bias else None
- if trainable:
- self.weight = torch.nn.Parameter(weight)
- self.bias = torch.nn.Parameter(bias) if bias is not None else None
- else:
- self.register_buffer('weight', weight)
- if bias is not None:
- self.register_buffer('bias', bias)
- else:
- self.bias = None
-
- def forward(self, x, gain=1):
- w = self.weight * self.weight_gain
- b = self.bias.to(x.dtype) if self.bias is not None else None
- flip_weight = (self.up == 1) # slightly faster
- x = conv2d_resample.conv2d_resample(x=x, w=w.to(
- x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
-
- act_gain = self.act_gain * gain
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
- x = bias_act.bias_act(x, b, act=self.activation,
- gain=act_gain, clamp=act_clamp)
- return x
-
- def extra_repr(self):
- return ' '.join([
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
- f'up={self.up}, down={self.down}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class MappingNetwork(torch.nn.Module):
- def __init__(self,
- # Input latent (Z) dimensionality, 0 = no latent.
- z_dim,
- # Conditioning label (C) dimensionality, 0 = no label.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- # Number of intermediate latents to output, None = do not broadcast.
- num_ws,
- num_layers=8, # Number of mapping layers.
- # Label embedding dimensionality, None = same as w_dim.
- embed_features=None,
- # Number of intermediate features in the mapping layers, None = same as w_dim.
- layer_features=None,
- # Activation function: 'relu', 'lrelu', etc.
- activation='lrelu',
- # Learning rate multiplier for the mapping layers.
- lr_multiplier=0.01,
- # Decay for tracking the moving average of W during training, None = do not track.
- w_avg_beta=0.998,
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.num_ws = num_ws
- self.num_layers = num_layers
- self.w_avg_beta = w_avg_beta
-
- if embed_features is None:
- embed_features = w_dim
- if c_dim == 0:
- embed_features = 0
- if layer_features is None:
- layer_features = w_dim
- features_list = [z_dim + embed_features] + \
- [layer_features] * (num_layers - 1) + [w_dim]
-
- if c_dim > 0:
- self.embed = FullyConnectedLayer(c_dim, embed_features)
- for idx in range(num_layers):
- in_features = features_list[idx]
- out_features = features_list[idx + 1]
- layer = FullyConnectedLayer(
- in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
- setattr(self, f'fc{idx}', layer)
-
- if num_ws is not None and w_avg_beta is not None:
- self.register_buffer('w_avg', torch.zeros([w_dim]))
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
- # Embed, normalize, and concat inputs.
- x = None
- with torch.autograd.profiler.record_function('input'):
- if self.z_dim > 0:
- misc.assert_shape(z, [None, self.z_dim])
- x = normalize_2nd_moment(z.to(torch.float32))
- if self.c_dim > 0:
- misc.assert_shape(c, [None, self.c_dim])
- y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
- x = torch.cat([x, y], dim=1) if x is not None else y
-
- # Main layers.
- for idx in range(self.num_layers):
- layer = getattr(self, f'fc{idx}')
- x = layer(x)
-
- # Update moving average of W.
- if update_emas and self.w_avg_beta is not None:
- with torch.autograd.profiler.record_function('update_w_avg'):
- self.w_avg.copy_(x.detach().mean(
- dim=0).lerp(self.w_avg, self.w_avg_beta))
-
- # Broadcast.
- if self.num_ws is not None:
- with torch.autograd.profiler.record_function('broadcast'):
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
-
- # Apply truncation.
- if truncation_psi != 1:
- with torch.autograd.profiler.record_function('truncate'):
- assert self.w_avg_beta is not None
- if self.num_ws is None or truncation_cutoff is None:
- x = self.w_avg.lerp(x, truncation_psi)
- else:
- x[:, :truncation_cutoff] = self.w_avg.lerp(
- x[:, :truncation_cutoff], truncation_psi)
- return x
-
- def extra_repr(self):
- return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisLayer(torch.nn.Module):
- def __init__(self,
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- # Intermediate latent (W) dimensionality.
- w_dim,
- resolution, # Resolution of this layer.
- kernel_size=3, # Convolution kernel size.
- up=1, # Integer upsampling factor.
- use_noise=True, # Enable noise input?
- # Activation function: 'relu', 'lrelu', etc.
- activation='lrelu',
- # Low-pass filter to apply when resampling activations.
- resample_filter=[1, 3, 3, 1],
- # Clamp the output of convolution layers to +-X, None = disable clamping.
- conv_clamp=None,
- channels_last=False, # Use channels_last format for the weights?
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.w_dim = w_dim
- self.resolution = resolution
- self.up = up
- self.use_noise = use_noise
- self.activation = activation
- self.conv_clamp = conv_clamp
- self.register_buffer(
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
- self.padding = kernel_size // 2
- self.act_gain = bias_act.activation_funcs[activation].def_gain
-
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
- self.weight = torch.nn.Parameter(torch.randn(
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
- if use_noise:
- self.register_buffer(
- 'noise_const', torch.randn([resolution, resolution]))
- self.noise_strength = torch.nn.Parameter(torch.zeros([]))
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
-
- def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
- assert noise_mode in ['random', 'const', 'none']
- in_resolution = self.resolution // self.up
- misc.assert_shape(x, [None, self.in_channels,
- in_resolution, in_resolution])
- styles = self.affine(w)
-
- noise = None
- if self.use_noise and noise_mode == 'random':
- noise = torch.randn([x.shape[0], 1, self.resolution,
- self.resolution], device=x.device) * self.noise_strength
- if self.use_noise and noise_mode == 'const':
- noise = self.noise_const * self.noise_strength
-
- flip_weight = (self.up == 1) # slightly faster
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
- padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
-
- act_gain = self.act_gain * gain
- act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
- x = bias_act.bias_act(x, self.bias.to(
- x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
- return x
-
- def extra_repr(self):
- return ' '.join([
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
- f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class ToRGBLayer(torch.nn.Module):
- def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.w_dim = w_dim
- self.conv_clamp = conv_clamp
- self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
- memory_format = torch.channels_last if channels_last else torch.contiguous_format
- self.weight = torch.nn.Parameter(torch.randn(
- [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
- self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
- self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
-
- def forward(self, x, w, fused_modconv=True):
- styles = self.affine(w) * self.weight_gain
- x = modulated_conv2d(x=x, weight=self.weight, styles=styles,
- demodulate=False, fused_modconv=fused_modconv)
- x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
- return x
-
- def extra_repr(self):
- return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisBlock(torch.nn.Module):
- def __init__(self,
- # Number of input channels, 0 = first block.
- in_channels,
- # Number of output channels.
- out_channels,
- # Intermediate latent (W) dimensionality.
- w_dim,
- # Resolution of this block.
- resolution,
- # Number of output color channels.
- img_channels,
- is_last, # Is this the last block?
- # Architecture: 'orig', 'skip', 'resnet'.
- architecture='skip',
- # Low-pass filter to apply when resampling activations.
- resample_filter=[1, 3, 3, 1],
- # Clamp the output of convolution layers to +-X, None = disable clamping.
- conv_clamp=256,
- use_fp16=False, # Use FP16 for this block?
- fp16_channels_last=False, # Use channels-last memory format with FP16?
- # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
- fused_modconv_default=True,
- # Arguments for SynthesisLayer.
- **layer_kwargs,
- ):
- assert architecture in ['orig', 'skip', 'resnet']
- super().__init__()
- self.in_channels = in_channels
- self.w_dim = w_dim
- self.resolution = resolution
- self.img_channels = img_channels
- self.is_last = is_last
- self.architecture = architecture
- self.use_fp16 = use_fp16
- self.channels_last = (use_fp16 and fp16_channels_last)
- self.fused_modconv_default = fused_modconv_default
- self.register_buffer(
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
- self.num_conv = 0
- self.num_torgb = 0
-
- if in_channels == 0:
- self.const = torch.nn.Parameter(torch.randn(
- [out_channels, resolution, resolution]))
-
- if in_channels != 0:
- self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
- resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
- self.num_conv += 1
-
- self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
- conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
- self.num_conv += 1
-
- if is_last or architecture == 'skip':
- self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
- conv_clamp=conv_clamp, channels_last=self.channels_last)
- self.num_torgb += 1
-
- if in_channels != 0 and architecture == 'resnet':
- self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
- resample_filter=resample_filter, channels_last=self.channels_last)
-
- def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
- _ = update_emas # unused
- misc.assert_shape(
- ws, [None, self.num_conv + self.num_torgb, self.w_dim])
- w_iter = iter(ws.unbind(dim=1))
- if ws.device.type != 'cuda':
- force_fp32 = True
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
- if fused_modconv is None:
- fused_modconv = self.fused_modconv_default
- if fused_modconv == 'inference_only':
- fused_modconv = (not self.training)
-
- # Input.
- if self.in_channels == 0:
- x = self.const.to(dtype=dtype, memory_format=memory_format)
- x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
- else:
- misc.assert_shape(x, [None, self.in_channels,
- self.resolution // 2, self.resolution // 2])
- x = x.to(dtype=dtype, memory_format=memory_format)
-
- # Main layers.
- if self.in_channels == 0:
- x = self.conv1(x, next(w_iter),
- fused_modconv=fused_modconv, **layer_kwargs)
- elif self.architecture == 'resnet':
- y = self.skip(x, gain=np.sqrt(0.5))
- x = self.conv0(x, next(w_iter),
- fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv,
- gain=np.sqrt(0.5), **layer_kwargs)
- x = y.add_(x)
- else:
- x = self.conv0(x, next(w_iter),
- fused_modconv=fused_modconv, **layer_kwargs)
- x = self.conv1(x, next(w_iter),
- fused_modconv=fused_modconv, **layer_kwargs)
-
- # ToRGB.
- if img is not None:
- misc.assert_shape(
- img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
- img = upfirdn2d.upsample2d(img, self.resample_filter)
- if self.is_last or self.architecture == 'skip':
- y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
- y = y.to(dtype=torch.float32,
- memory_format=torch.contiguous_format)
- img = img.add_(y) if img is not None else y
-
- assert x.dtype == dtype
- assert img is None or img.dtype == torch.float32
- return x, img
-
- def extra_repr(self):
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisNetwork(torch.nn.Module):
- def __init__(self,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output image resolution.
- img_channels, # Number of color channels.
- # Overall multiplier for the number of channels.
- channel_base=32768,
- # Maximum number of channels in any layer.
- channel_max=512,
- # Use FP16 for the N highest resolutions.
- num_fp16_res=4,
- **block_kwargs, # Arguments for SynthesisBlock.
- ):
- assert img_resolution >= 4 and img_resolution & (
- img_resolution - 1) == 0
- super().__init__()
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.img_resolution_log2 = int(np.log2(img_resolution))
- self.img_channels = img_channels
- self.num_fp16_res = num_fp16_res
- self.block_resolutions = [
- 2 ** i for i in range(2, self.img_resolution_log2 + 1)]
- channels_dict = {res: min(channel_base // res, channel_max)
- for res in self.block_resolutions}
- fp16_resolution = max(
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
-
- self.num_ws = 0
- for res in self.block_resolutions:
- in_channels = channels_dict[res // 2] if res > 4 else 0
- out_channels = channels_dict[res]
- use_fp16 = (res >= fp16_resolution)
- is_last = (res == self.img_resolution)
- block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
- img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
- self.num_ws += block.num_conv
- if is_last:
- self.num_ws += block.num_torgb
- setattr(self, f'b{res}', block)
-
- def forward(self, ws, return_feature=False, **block_kwargs):
- block_ws = []
- features = []
- with torch.autograd.profiler.record_function('split_ws'):
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
- ws = ws.to(torch.float32)
- w_idx = 0
- for res in self.block_resolutions:
- block = getattr(self, f'b{res}')
- block_ws.append(
- ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
- w_idx += block.num_conv
-
- x = img = None
- for res, cur_ws in zip(self.block_resolutions, block_ws):
- block = getattr(self, f'b{res}')
- x, img = block(x, img, cur_ws, **block_kwargs)
- features.append(x)
- if return_feature:
- return img, features
- else:
- return img
-
- def extra_repr(self):
- return ' '.join([
- f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
- f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
- f'num_fp16_res={self.num_fp16_res:d}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class Generator(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- # Conditioning label (C) dimensionality.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output resolution.
- img_channels, # Number of output color channels.
- mapping_kwargs={}, # Arguments for MappingNetwork.
- synthesis_kwargs={}, # Arguments for SynthesisNetwork.
- resize=None,
- **synthesis_kwargs2, # Arguments for SynthesisNetwork.
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.img_channels = img_channels
- if len(synthesis_kwargs) == 0:
- synthesis_kwargs = synthesis_kwargs2
- self.synthesis = SynthesisNetwork(
- w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
- self.num_ws = self.synthesis.num_ws
- self.mapping = MappingNetwork(
- z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
- self.resize = resize
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
- if input_is_w:
- ws = z
- if ws.dim() == 2:
- ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
- else:
- ws = self.mapping(z, c, truncation_psi=truncation_psi,
- truncation_cutoff=truncation_cutoff, update_emas=update_emas)
- img = self.synthesis(ws, update_emas=update_emas,
- return_feature=return_feature, **synthesis_kwargs)
- if return_feature:
- img, feature = img
- if self.resize is not None:
- img = imresize(img, [self.resize, self.resize])
- if return_feature:
- return img, feature
- else:
- return img
-
-
-def imresize(image, size):
- dim = image.dim()
- if dim == 3:
- image = image.unsqueeze(1)
- b, _, h, w = image.shape
- if size[0] > h:
- image = F.interpolate(image, size, mode='bilinear')
- elif size[0] < h:
- image = F.interpolate(image, size, mode='area')
- if dim == 3:
- image = image.squeeze(1)
- return image
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class DiscriminatorBlock(torch.nn.Module):
- def __init__(self,
- # Number of input channels, 0 = first block.
- in_channels,
- # Number of intermediate channels.
- tmp_channels,
- # Number of output channels.
- out_channels,
- # Resolution of this block.
- resolution,
- # Number of input color channels.
- img_channels,
- # Index of the first layer.
- first_layer_idx,
- # Architecture: 'orig', 'skip', 'resnet'.
- architecture='resnet',
- # Activation function: 'relu', 'lrelu', etc.
- activation='lrelu',
- # Low-pass filter to apply when resampling activations.
- resample_filter=[1, 3, 3, 1],
- # Clamp the output of convolution layers to +-X, None = disable clamping.
- conv_clamp=None,
- use_fp16=False, # Use FP16 for this block?
- fp16_channels_last=False, # Use channels-last memory format with FP16?
- # Freeze-D: Number of layers to freeze.
- freeze_layers=0,
- ):
- assert in_channels in [0, tmp_channels]
- assert architecture in ['orig', 'skip', 'resnet']
- super().__init__()
- self.in_channels = in_channels
- self.resolution = resolution
- self.img_channels = img_channels
- self.first_layer_idx = first_layer_idx
- self.architecture = architecture
- self.use_fp16 = use_fp16
- self.channels_last = (use_fp16 and fp16_channels_last)
- self.register_buffer(
- 'resample_filter', upfirdn2d.setup_filter(resample_filter))
-
- self.num_layers = 0
-
- def trainable_gen():
- while True:
- layer_idx = self.first_layer_idx + self.num_layers
- trainable = (layer_idx >= freeze_layers)
- self.num_layers += 1
- yield trainable
- trainable_iter = trainable_gen()
-
- if in_channels == 0 or architecture == 'skip':
- self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
-
- self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
- trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
-
- self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
- trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
-
- if architecture == 'resnet':
- self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
- trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
-
- def forward(self, x, img, force_fp32=False):
- if (x if x is not None else img).device.type != 'cuda':
- force_fp32 = True
- dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
- memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
-
- # Input.
- if x is not None:
- misc.assert_shape(x, [None, self.in_channels,
- self.resolution, self.resolution])
- x = x.to(dtype=dtype, memory_format=memory_format)
-
- # FromRGB.
- if self.in_channels == 0 or self.architecture == 'skip':
- misc.assert_shape(
- img, [None, self.img_channels, self.resolution, self.resolution])
- img = img.to(dtype=dtype, memory_format=memory_format)
- y = self.fromrgb(img)
- x = x + y if x is not None else y
- img = upfirdn2d.downsample2d(
- img, self.resample_filter) if self.architecture == 'skip' else None
-
- # Main layers.
- if self.architecture == 'resnet':
- y = self.skip(x, gain=np.sqrt(0.5))
- x = self.conv0(x)
- x = self.conv1(x, gain=np.sqrt(0.5))
- x = y.add_(x)
- else:
- x = self.conv0(x)
- x = self.conv1(x)
-
- assert x.dtype == dtype
- return x, img
-
- def extra_repr(self):
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class MinibatchStdLayer(torch.nn.Module):
- def __init__(self, group_size, num_channels=1):
- super().__init__()
- self.group_size = group_size
- self.num_channels = num_channels
-
- def forward(self, x):
- N, C, H, W = x.shape
- with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
- G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(
- N)) if self.group_size is not None else N
- F = self.num_channels
- c = C // F
-
- # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
- y = x.reshape(G, -1, F, c, H, W)
- # [GnFcHW] Subtract mean over group.
- y = y - y.mean(dim=0)
- # [nFcHW] Calc variance over group.
- y = y.square().mean(dim=0)
- y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
- # [nF] Take average over channels and pixels.
- y = y.mean(dim=[2, 3, 4])
- y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
- # [NFHW] Replicate over group and pixels.
- y = y.repeat(G, 1, H, W)
- # [NCHW] Append to input as new channels.
- x = torch.cat([x, y], dim=1)
- return x
-
- def extra_repr(self):
- return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class DiscriminatorEpilogue(torch.nn.Module):
- def __init__(self,
- in_channels, # Number of input channels.
- # Dimensionality of mapped conditioning label, 0 = no label.
- cmap_dim,
- resolution, # Resolution of this block.
- # Number of input color channels.
- img_channels,
- # Architecture: 'orig', 'skip', 'resnet'.
- architecture='resnet',
- # Group size for the minibatch standard deviation layer, None = entire minibatch.
- mbstd_group_size=4,
- # Number of features for the minibatch standard deviation layer, 0 = disable.
- mbstd_num_channels=1,
- # Activation function: 'relu', 'lrelu', etc.
- activation='lrelu',
- # Clamp the output of convolution layers to +-X, None = disable clamping.
- conv_clamp=None,
- ):
- assert architecture in ['orig', 'skip', 'resnet']
- super().__init__()
- self.in_channels = in_channels
- self.cmap_dim = cmap_dim
- self.resolution = resolution
- self.img_channels = img_channels
- self.architecture = architecture
-
- if architecture == 'skip':
- self.fromrgb = Conv2dLayer(
- img_channels, in_channels, kernel_size=1, activation=activation)
- self.mbstd = MinibatchStdLayer(
- group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
- self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels,
- kernel_size=3, activation=activation, conv_clamp=conv_clamp)
- self.fc = FullyConnectedLayer(
- in_channels * (resolution ** 2), in_channels, activation=activation)
- self.out = FullyConnectedLayer(
- in_channels, 1 if cmap_dim == 0 else cmap_dim)
-
- def forward(self, x, img, cmap, force_fp32=False):
- misc.assert_shape(x, [None, self.in_channels,
- self.resolution, self.resolution]) # [NCHW]
- _ = force_fp32 # unused
- dtype = torch.float32
- memory_format = torch.contiguous_format
-
- # FromRGB.
- x = x.to(dtype=dtype, memory_format=memory_format)
- if self.architecture == 'skip':
- misc.assert_shape(
- img, [None, self.img_channels, self.resolution, self.resolution])
- img = img.to(dtype=dtype, memory_format=memory_format)
- x = x + self.fromrgb(img)
-
- # Main layers.
- if self.mbstd is not None:
- x = self.mbstd(x)
- x = self.conv(x)
- x = self.fc(x.flatten(1))
- x = self.out(x)
-
- # Conditioning.
- if self.cmap_dim > 0:
- misc.assert_shape(cmap, [None, self.cmap_dim])
- x = (x * cmap).sum(dim=1, keepdim=True) * \
- (1 / np.sqrt(self.cmap_dim))
-
- assert x.dtype == dtype
- return x
-
- def extra_repr(self):
- return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class Discriminator(torch.nn.Module):
- def __init__(self,
- # Conditioning label (C) dimensionality.
- c_dim,
- img_resolution, # Input resolution.
- # Number of input color channels.
- img_channels,
- # Architecture: 'orig', 'skip', 'resnet'.
- architecture='resnet',
- # Overall multiplier for the number of channels.
- channel_base=32768,
- # Maximum number of channels in any layer.
- channel_max=512,
- # Use FP16 for the N highest resolutions.
- num_fp16_res=4,
- # Clamp the output of convolution layers to +-X, None = disable clamping.
- conv_clamp=256,
- # Dimensionality of mapped conditioning label, None = default.
- cmap_dim=None,
- block_kwargs={}, # Arguments for DiscriminatorBlock.
- mapping_kwargs={}, # Arguments for MappingNetwork.
- # Arguments for DiscriminatorEpilogue.
- epilogue_kwargs={},
- ):
- super().__init__()
- self.c_dim = c_dim
- self.img_resolution = img_resolution
- self.img_resolution_log2 = int(np.log2(img_resolution))
- self.img_channels = img_channels
- self.block_resolutions = [
- 2 ** i for i in range(self.img_resolution_log2, 2, -1)]
- channels_dict = {res: min(channel_base // res, channel_max)
- for res in self.block_resolutions + [4]}
- fp16_resolution = max(
- 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
-
- if cmap_dim is None:
- cmap_dim = channels_dict[4]
- if c_dim == 0:
- cmap_dim = 0
-
- common_kwargs = dict(img_channels=img_channels,
- architecture=architecture, conv_clamp=conv_clamp)
- cur_layer_idx = 0
- for res in self.block_resolutions:
- in_channels = channels_dict[res] if res < img_resolution else 0
- tmp_channels = channels_dict[res]
- out_channels = channels_dict[res // 2]
- use_fp16 = (res >= fp16_resolution)
- block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
- first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
- setattr(self, f'b{res}', block)
- cur_layer_idx += block.num_layers
- if c_dim > 0:
- self.mapping = MappingNetwork(
- z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
- self.b4 = DiscriminatorEpilogue(
- channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
-
- def forward(self, img, c, update_emas=False, **block_kwargs):
- _ = update_emas # unused
- x = None
- for res in self.block_resolutions:
- block = getattr(self, f'b{res}')
- x, img = block(x, img, **block_kwargs)
-
- cmap = None
- if self.c_dim > 0:
- cmap = self.mapping(None, c)
- x = self.b4(x, img, cmap)
- return x
-
- def extra_repr(self):
- return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
-
-# ----------------------------------------------------------------------------
diff --git a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py b/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py
deleted file mode 100644
index 338fd287110a02d76c0b7b03fbf041c340f5adb9..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py
+++ /dev/null
@@ -1,645 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Generator architecture from the paper
-"Alias-Free Generative Adversarial Networks"."""
-
-import numpy as np
-import scipy.signal
-import scipy.optimize
-import torch
-import torch.nn.functional as F
-from torch_utils import misc
-from torch_utils import persistence
-from torch_utils.ops import conv2d_gradfix
-from torch_utils.ops import filtered_lrelu
-from torch_utils.ops import bias_act
-
-# ----------------------------------------------------------------------------
-
-
-@misc.profiled_function
-def modulated_conv2d(
- # Input tensor: [batch_size, in_channels, in_height, in_width]
- x,
- # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
- w,
- s, # Style tensor: [batch_size, in_channels]
- demodulate=True, # Apply weight demodulation?
- padding=0, # Padding: int or [padH, padW]
- input_gain=None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
-):
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
- batch_size = int(x.shape[0])
- out_channels, in_channels, kh, kw = w.shape
- misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
- misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
- misc.assert_shape(s, [batch_size, in_channels]) # [NI]
-
- # Pre-normalize inputs.
- if demodulate:
- w = w * w.square().mean([1, 2, 3], keepdim=True).rsqrt()
- s = s * s.square().mean().rsqrt()
-
- # Modulate weights.
- w = w.unsqueeze(0) # [NOIkk]
- w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Demodulate weights.
- if demodulate:
- dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO]
- w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Apply input scaling.
- if input_gain is not None:
- input_gain = input_gain.expand(batch_size, in_channels) # [NI]
- w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
-
- # Execute as one fused op using grouped convolution.
- x = x.reshape(1, -1, *x.shape[2:])
- w = w.reshape(-1, in_channels, kh, kw)
- x = conv2d_gradfix.conv2d(input=x, weight=w.to(
- x.dtype), padding=padding, groups=batch_size)
- x = x.reshape(batch_size, -1, *x.shape[2:])
- return x
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class FullyConnectedLayer(torch.nn.Module):
- def __init__(self,
- in_features, # Number of input features.
- out_features, # Number of output features.
- # Activation function: 'relu', 'lrelu', etc.
- activation='linear',
- bias=True, # Apply additive bias before the activation function?
- lr_multiplier=1, # Learning rate multiplier.
- # Initial standard deviation of the weight tensor.
- weight_init=1,
- bias_init=0, # Initial value of the additive bias.
- ):
- super().__init__()
- self.in_features = in_features
- self.out_features = out_features
- self.activation = activation
- self.weight = torch.nn.Parameter(torch.randn(
- [out_features, in_features]) * (weight_init / lr_multiplier))
- bias_init = np.broadcast_to(np.asarray(
- bias_init, dtype=np.float32), [out_features])
- self.bias = torch.nn.Parameter(torch.from_numpy(
- bias_init / lr_multiplier)) if bias else None
- self.weight_gain = lr_multiplier / np.sqrt(in_features)
- self.bias_gain = lr_multiplier
-
- def forward(self, x):
- w = self.weight.to(x.dtype) * self.weight_gain
- b = self.bias
- if b is not None:
- b = b.to(x.dtype)
- if self.bias_gain != 1:
- b = b * self.bias_gain
- if self.activation == 'linear' and b is not None:
- x = torch.addmm(b.unsqueeze(0), x, w.t())
- else:
- x = x.matmul(w.t())
- x = bias_act.bias_act(x, b, act=self.activation)
- return x
-
- def extra_repr(self):
- return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class MappingNetwork(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- # Conditioning label (C) dimensionality, 0 = no labels.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- # Number of intermediate latents to output.
- num_ws,
- num_layers=2, # Number of mapping layers.
- # Learning rate multiplier for the mapping layers.
- lr_multiplier=0.01,
- # Decay for tracking the moving average of W during training.
- w_avg_beta=0.998,
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.num_ws = num_ws
- self.num_layers = num_layers
- self.w_avg_beta = w_avg_beta
-
- # Construct layers.
- self.embed = FullyConnectedLayer(
- self.c_dim, self.w_dim) if self.c_dim > 0 else None
- features = [self.z_dim + (self.w_dim if self.c_dim >
- 0 else 0)] + [self.w_dim] * self.num_layers
- for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
- layer = FullyConnectedLayer(
- in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
- setattr(self, f'fc{idx}', layer)
- self.register_buffer('w_avg', torch.zeros([w_dim]))
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
- misc.assert_shape(z, [None, self.z_dim])
- if truncation_cutoff is None:
- truncation_cutoff = self.num_ws
-
- # Embed, normalize, and concatenate inputs.
- x = z.to(torch.float32)
- x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
- if self.c_dim > 0:
- misc.assert_shape(c, [None, self.c_dim])
- y = self.embed(c.to(torch.float32))
- y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
- x = torch.cat([x, y], dim=1) if x is not None else y
-
- # Execute layers.
- for idx in range(self.num_layers):
- x = getattr(self, f'fc{idx}')(x)
-
- # Update moving average of W.
- if update_emas:
- self.w_avg.copy_(x.detach().mean(
- dim=0).lerp(self.w_avg, self.w_avg_beta))
-
- # Broadcast and apply truncation.
- x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
- if truncation_psi != 1:
- x[:, :truncation_cutoff] = self.w_avg.lerp(
- x[:, :truncation_cutoff], truncation_psi)
- return x
-
- def extra_repr(self):
- return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisInput(torch.nn.Module):
- def __init__(self,
- w_dim, # Intermediate latent (W) dimensionality.
- channels, # Number of output channels.
- size, # Output spatial size: int or [width, height].
- sampling_rate, # Output sampling rate.
- bandwidth, # Output bandwidth.
- ):
- super().__init__()
- self.w_dim = w_dim
- self.channels = channels
- self.size = np.broadcast_to(np.asarray(size), [2])
- self.sampling_rate = sampling_rate
- self.bandwidth = bandwidth
-
- # Draw random frequencies from uniform 2D disc.
- freqs = torch.randn([self.channels, 2])
- radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
- freqs /= radii * radii.square().exp().pow(0.25)
- freqs *= bandwidth
- phases = torch.rand([self.channels]) - 0.5
-
- # Setup parameters and buffers.
- self.weight = torch.nn.Parameter(
- torch.randn([self.channels, self.channels]))
- self.affine = FullyConnectedLayer(
- w_dim, 4, weight_init=0, bias_init=[1, 0, 0, 0])
- # User-specified inverse transform wrt. resulting image.
- self.register_buffer('transform', torch.eye(3, 3))
- self.register_buffer('freqs', freqs)
- self.register_buffer('phases', phases)
-
- def forward(self, w):
- # Introduce batch dimension.
- transforms = self.transform.unsqueeze(0) # [batch, row, col]
- freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
- phases = self.phases.unsqueeze(0) # [batch, channel]
-
- # Apply learned transformation.
- t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
- # t' = (r'_c, r'_s, t'_x, t'_y)
- t = t / t[:, :2].norm(dim=1, keepdim=True)
- # Inverse rotation wrt. resulting image.
- m_r = torch.eye(3, device=w.device).unsqueeze(
- 0).repeat([w.shape[0], 1, 1])
- m_r[:, 0, 0] = t[:, 0] # r'_c
- m_r[:, 0, 1] = -t[:, 1] # r'_s
- m_r[:, 1, 0] = t[:, 1] # r'_s
- m_r[:, 1, 1] = t[:, 0] # r'_c
- # Inverse translation wrt. resulting image.
- m_t = torch.eye(3, device=w.device).unsqueeze(
- 0).repeat([w.shape[0], 1, 1])
- m_t[:, 0, 2] = -t[:, 2] # t'_x
- m_t[:, 1, 2] = -t[:, 3] # t'_y
- # First rotate resulting image, then translate, and finally apply user-specified transform.
- transforms = m_r @ m_t @ transforms
-
- # Transform frequencies.
- phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
- freqs = freqs @ transforms[:, :2, :2]
-
- # Dampen out-of-band frequencies that may occur due to the user-specified transform.
- amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) /
- (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
-
- # Construct sampling grid.
- theta = torch.eye(2, 3, device=w.device)
- theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
- theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
- grids = torch.nn.functional.affine_grid(theta.unsqueeze(
- 0), [1, 1, self.size[1], self.size[0]], align_corners=False)
-
- # Compute Fourier features.
- x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)
- ).squeeze(3) # [batch, height, width, channel]
- x = x + phases.unsqueeze(1).unsqueeze(2)
- x = torch.sin(x * (np.pi * 2))
- x = x * amplitudes.unsqueeze(1).unsqueeze(2)
-
- # Apply trainable mapping.
- weight = self.weight / np.sqrt(self.channels)
- x = x @ weight.t()
-
- # Ensure correct shape.
- x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
- misc.assert_shape(x, [w.shape[0], self.channels,
- int(self.size[1]), int(self.size[0])])
- return x
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
- f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisLayer(torch.nn.Module):
- def __init__(self,
- # Intermediate latent (W) dimensionality.
- w_dim,
- is_torgb, # Is this the final ToRGB layer?
- is_critically_sampled, # Does this layer use critical sampling?
- use_fp16, # Does this layer use FP16?
-
- # Input & output specifications.
- in_channels, # Number of input channels.
- out_channels, # Number of output channels.
- # Input spatial size: int or [width, height].
- in_size,
- # Output spatial size: int or [width, height].
- out_size,
- in_sampling_rate, # Input sampling rate (s).
- out_sampling_rate, # Output sampling rate (s).
- # Input cutoff frequency (f_c).
- in_cutoff,
- # Output cutoff frequency (f_c).
- out_cutoff,
- # Input transition band half-width (f_h).
- in_half_width,
- # Output Transition band half-width (f_h).
- out_half_width,
-
- # Hyperparameters.
- # Convolution kernel size. Ignored for final the ToRGB layer.
- conv_kernel=3,
- # Low-pass filter size relative to the lower resolution when up/downsampling.
- filter_size=6,
- # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
- lrelu_upsampling=2,
- # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
- use_radial_filters=False,
- # Clamp the output to [-X, +X], None = disable clamping.
- conv_clamp=256,
- # Decay rate for the moving average of input magnitudes.
- magnitude_ema_beta=0.999,
- ):
- super().__init__()
- self.w_dim = w_dim
- self.is_torgb = is_torgb
- self.is_critically_sampled = is_critically_sampled
- self.use_fp16 = use_fp16
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.in_size = np.broadcast_to(np.asarray(in_size), [2])
- self.out_size = np.broadcast_to(np.asarray(out_size), [2])
- self.in_sampling_rate = in_sampling_rate
- self.out_sampling_rate = out_sampling_rate
- self.tmp_sampling_rate = max(
- in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
- self.in_cutoff = in_cutoff
- self.out_cutoff = out_cutoff
- self.in_half_width = in_half_width
- self.out_half_width = out_half_width
- self.conv_kernel = 1 if is_torgb else conv_kernel
- self.conv_clamp = conv_clamp
- self.magnitude_ema_beta = magnitude_ema_beta
-
- # Setup parameters and buffers.
- self.affine = FullyConnectedLayer(
- self.w_dim, self.in_channels, bias_init=1)
- self.weight = torch.nn.Parameter(torch.randn(
- [self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
- self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
- self.register_buffer('magnitude_ema', torch.ones([]))
-
- # Design upsampling filter.
- self.up_factor = int(
- np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
- assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
- self.up_taps = filter_size * \
- self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
- self.register_buffer('up_filter', self.design_lowpass_filter(
- numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
-
- # Design downsampling filter.
- self.down_factor = int(
- np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
- assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
- self.down_taps = filter_size * \
- self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
- self.down_radial = use_radial_filters and not self.is_critically_sampled
- self.register_buffer('down_filter', self.design_lowpass_filter(
- numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
-
- # Compute padding.
- # Desired output size before downsampling.
- pad_total = (self.out_size - 1) * self.down_factor + 1
- # Input size after upsampling.
- pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor
- # Size reduction caused by the filters.
- pad_total += self.up_taps + self.down_taps - 2
- # Shift sample locations according to the symmetric interpretation (Appendix C.3).
- pad_lo = (pad_total + self.up_factor) // 2
- pad_hi = pad_total - pad_lo
- self.padding = [int(pad_lo[0]), int(pad_hi[0]),
- int(pad_lo[1]), int(pad_hi[1])]
-
- def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
- assert noise_mode in ['random', 'const', 'none'] # unused
- misc.assert_shape(x, [None, self.in_channels, int(
- self.in_size[1]), int(self.in_size[0])])
- misc.assert_shape(w, [x.shape[0], self.w_dim])
-
- # Track input magnitude.
- if update_emas:
- with torch.autograd.profiler.record_function('update_magnitude_ema'):
- magnitude_cur = x.detach().to(torch.float32).square().mean()
- self.magnitude_ema.copy_(magnitude_cur.lerp(
- self.magnitude_ema, self.magnitude_ema_beta))
- input_gain = self.magnitude_ema.rsqrt()
-
- # Execute affine layer.
- styles = self.affine(w)
- if self.is_torgb:
- weight_gain = 1 / \
- np.sqrt(self.in_channels * (self.conv_kernel ** 2))
- styles = styles * weight_gain
-
- # Execute modulated conv2d.
- dtype = torch.float16 if (
- self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
- x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
- padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
-
- # Execute bias, filtered leaky ReLU, and clamping.
- gain = 1 if self.is_torgb else np.sqrt(2)
- slope = 1 if self.is_torgb else 0.2
- x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
- up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
-
- # Ensure correct shape and dtype.
- misc.assert_shape(x, [None, self.out_channels, int(
- self.out_size[1]), int(self.out_size[0])])
- assert x.dtype == dtype
- return x
-
- @staticmethod
- def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
- assert numtaps >= 1
-
- # Identity filter.
- if numtaps == 1:
- return None
-
- # Separable Kaiser low-pass filter.
- if not radial:
- f = scipy.signal.firwin(
- numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
- return torch.as_tensor(f, dtype=torch.float32)
-
- # Radially symmetric jinc-based filter.
- x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
- r = np.hypot(*np.meshgrid(x, x))
- f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
- beta = scipy.signal.kaiser_beta(
- scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
- w = np.kaiser(numtaps, beta)
- f *= np.outer(w, w)
- f /= np.sum(f)
- return torch.as_tensor(f, dtype=torch.float32)
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
- f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
- f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
- f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
- f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
- f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
- f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class SynthesisNetwork(torch.nn.Module):
- def __init__(self,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output image resolution.
- img_channels, # Number of color channels.
- # Overall multiplier for the number of channels.
- channel_base=32768,
- # Maximum number of channels in any layer.
- channel_max=512,
- # Total number of layers, excluding Fourier features and ToRGB.
- num_layers=14,
- # Number of critically sampled layers at the end.
- num_critical=2,
- # Cutoff frequency of the first layer (f_{c,0}).
- first_cutoff=2,
- # Minimum stopband of the first layer (f_{t,0}).
- first_stopband=2**2.1,
- # Minimum stopband of the last layer, expressed relative to the cutoff.
- last_stopband_rel=2**0.3,
- # Number of additional pixels outside the image.
- margin_size=10,
- output_scale=0.25, # Scale factor for the output image.
- # Use FP16 for the N highest resolutions.
- num_fp16_res=4,
- # Arguments for SynthesisLayer.
- **layer_kwargs,
- ):
- super().__init__()
- self.w_dim = w_dim
- self.num_ws = num_layers + 2
- self.img_resolution = img_resolution
- self.img_channels = img_channels
- self.num_layers = num_layers
- self.num_critical = num_critical
- self.margin_size = margin_size
- self.output_scale = output_scale
- self.num_fp16_res = num_fp16_res
-
- # Geometric progression of layer cutoffs and min. stopbands.
- last_cutoff = self.img_resolution / 2 # f_{c,N}
- last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
- exponents = np.minimum(
- np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
- cutoffs = first_cutoff * \
- (last_cutoff / first_cutoff) ** exponents # f_c[i]
- stopbands = first_stopband * \
- (last_stopband / first_stopband) ** exponents # f_t[i]
-
- # Compute remaining layer parameters.
- sampling_rates = np.exp2(
- np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
- half_widths = np.maximum(
- stopbands, sampling_rates / 2) - cutoffs # f_h[i]
- sizes = sampling_rates + self.margin_size * 2
- sizes[-2:] = self.img_resolution
- channels = np.rint(np.minimum(
- (channel_base / 2) / cutoffs, channel_max))
- channels[-1] = self.img_channels
-
- # Construct layers.
- self.input = SynthesisInput(
- w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
- sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
- self.layer_names = []
- for idx in range(self.num_layers + 1):
- prev = max(idx - 1, 0)
- is_torgb = (idx == self.num_layers)
- is_critically_sampled = (
- idx >= self.num_layers - self.num_critical)
- use_fp16 = (sampling_rates[idx] * (2 **
- self.num_fp16_res) > self.img_resolution)
- layer = SynthesisLayer(
- w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
- in_channels=int(channels[prev]), out_channels=int(channels[idx]),
- in_size=int(sizes[prev]), out_size=int(sizes[idx]),
- in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
- in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
- in_half_width=half_widths[prev], out_half_width=half_widths[idx],
- **layer_kwargs)
- name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
- setattr(self, name, layer)
- self.layer_names.append(name)
-
- def forward(self, ws, return_feature=False, **layer_kwargs):
- features = []
- misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
- ws = ws.to(torch.float32).unbind(dim=1)
-
- # Execute layers.
- x = self.input(ws[0])
- for name, w in zip(self.layer_names, ws[1:]):
- x = getattr(self, name)(x, w, **layer_kwargs)
- features.append(x)
- if self.output_scale != 1:
- x = x * self.output_scale
-
- # Ensure correct shape and dtype.
- misc.assert_shape(x, [None, self.img_channels,
- self.img_resolution, self.img_resolution])
- x = x.to(torch.float32)
- if return_feature:
- return x, features
- else:
- return x
-
- def extra_repr(self):
- return '\n'.join([
- f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
- f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
- f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
- f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
-
-# ----------------------------------------------------------------------------
-
-
-@persistence.persistent_class
-class Generator(torch.nn.Module):
- def __init__(self,
- z_dim, # Input latent (Z) dimensionality.
- # Conditioning label (C) dimensionality.
- c_dim,
- # Intermediate latent (W) dimensionality.
- w_dim,
- img_resolution, # Output resolution.
- img_channels, # Number of output color channels.
- mapping_kwargs={}, # Arguments for MappingNetwork.
- resize=None,
- **synthesis_kwargs, # Arguments for SynthesisNetwork.
- ):
- super().__init__()
- self.z_dim = z_dim
- self.c_dim = c_dim
- self.w_dim = w_dim
- self.img_resolution = img_resolution
- self.img_channels = img_channels
- self.synthesis = SynthesisNetwork(
- w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
- self.num_ws = self.synthesis.num_ws
- self.mapping = MappingNetwork(
- z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
- self.resize = resize
-
- def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs):
- if input_is_w:
- ws = z
- if ws.dim() == 2:
- ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1])
- else:
- ws = self.mapping(z, c, truncation_psi=truncation_psi,
- truncation_cutoff=truncation_cutoff, update_emas=update_emas)
- img = self.synthesis(ws, update_emas=update_emas,
- return_feature=return_feature, **synthesis_kwargs)
- if return_feature:
- img, feature = img
- if self.resize is not None:
- img = imresize(img, [self.resize, self.resize])
- if return_feature:
- return img, feature
- else:
- return img
-
-# ----------------------------------------------------------------------------
-
-
-def imresize(image, size):
- dim = image.dim()
- if dim == 3:
- image = image.unsqueeze(1)
- b, _, h, w = image.shape
- if size[0] > h:
- image = F.interpolate(image, size, mode='bilinear')
- elif size[0] < h:
- image = F.interpolate(image, size, mode='area')
- if dim == 3:
- image = image.squeeze(1)
- return image
diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py
deleted file mode 100644
index 00c84cc3dbd4a13d67d24aab15775c221a66059c..0000000000000000000000000000000000000000
--- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import abc
-import numpy as np
-import pytorch_lightning as pl
-from pytorch_lightning.utilities.types import (
- EVAL_DATALOADERS,
- TRAIN_DATALOADERS,
-)
-from torch import nn
-from torch.utils.data import DataLoader
-import torch
-from torchvision import transforms
-
-from src.dataset import DATASET_REGISTRY
-
-
-class AbstractModel(pl.LightningModule):
- def __init__(self, cfg):
- super().__init__()
- self.cfg = cfg
- self.train_dataset = None
- self.val_dataset = None
- self.metric_evaluator = None
- self.init_model()
-
- def setup(self, stage):
- if stage in ["fit", "validate", "test"]:
- self.train_dataset = DATASET_REGISTRY.get("BlenderDataset")(
- **self.cfg["dataset"]["train"]["params"],
- )
-
- self.val_dataset = DATASET_REGISTRY.get("BlenderDataset")(
- **self.cfg["dataset"]["val"]["params"],
- )
- # self.metric_evaluator = SHRECMetricEvaluator(
- # embed_dim=self.cfg["model"]["embed_dim"]
- # )
- @abc.abstractmethod
- def init_model(self):
- """
- Function to initialize model
- """
- raise NotImplementedError
-
- @abc.abstractmethod
- def forward(self, batch):
- raise NotImplementedError
-
- @abc.abstractmethod
- def compute_loss(self, forwarded_batch, input_batch):
- """
- Function to compute loss
- Args:
- forwarded_batch: output of `forward` method
- input_batch: input of batch method
-
- Returns:
- loss: computed loss
- """
- raise NotImplementedError
-
- def training_step(self, batch, batch_idx):
- # 1. get embeddings from model
- forwarded_batch = self.forward(batch)
- # 2. Calculate loss
- loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch)
- # 3. Update monitor
- self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
- return {"loss": loss}
-
- def validation_step(self, batch, batch_idx):
- # 1. Get embeddings from model
- forwarded_batch = self.forward(batch)
- # 2. Calculate loss
- loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch)
- # 3. Update metric for each batch
- self.log("val_loss", loss, on_step=True, on_epoch=True, prog_bar=True)
- self.metric_evaluator.append(
- g_emb=forwarded_batch["pc_embedding_feats"].float().clone().detach(),
- q_emb=forwarded_batch["query_embedding_feats"].float().clone().detach(),
- query_ids=batch["query_ids"],
- gallery_ids=batch["point_cloud_ids"],
- target_ids=batch["point_cloud_ids"],
- )
-
- return {"loss": loss}
-
- def validation_epoch_end(self, outputs) -> None:
- """
- Callback at validation epoch end to do additional works
- with output of validation step, note that this is called
- before `training_epoch_end()`
- Args:
- outputs: output of validation step
- """
- self.log_dict(
- self.metric_evaluator.evaluate(),
- prog_bar=True,
- on_step=False,
- on_epoch=True,
- )
- self.metric_evaluator.reset()
-
- def train_dataloader(self) -> TRAIN_DATALOADERS:
- train_loader = DataLoader(
- dataset=self.train_dataset,
- collate_fn=self.train_dataset.collate_fn,
- **self.cfg["data_loader"]["train"]["params"],
- )
- return train_loader
-
- def val_dataloader(self) -> EVAL_DATALOADERS:
- val_loader = DataLoader(
- dataset=self.val_dataset,
- collate_fn=self.val_dataset.collate_fn,
- **self.cfg["data_loader"]["val"]["params"],
- )
- return val_loader
-
- def configure_optimizers(self):
- pass
\ No newline at end of file
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile
deleted file mode 100644
index fab3b70827653a959434cb24929f86e3bd8890e2..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile
+++ /dev/null
@@ -1,47 +0,0 @@
-FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04
-LABEL maintainer="Hugging Face"
-LABEL repository="diffusers"
-
-ENV DEBIAN_FRONTEND=noninteractive
-
-RUN apt update && \
- apt install -y bash \
- build-essential \
- git \
- git-lfs \
- curl \
- ca-certificates \
- libsndfile1-dev \
- libgl1 \
- python3.8 \
- python3-pip \
- python3.8-venv && \
- rm -rf /var/lib/apt/lists
-
-# make sure to use venv
-RUN python3 -m venv /opt/venv
-ENV PATH="/opt/venv/bin:$PATH"
-
-# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
-RUN python3 -m pip install --no-cache-dir --upgrade pip && \
- python3 -m pip install --no-cache-dir \
- torch \
- torchvision \
- torchaudio \
- invisible_watermark && \
- python3 -m pip install --no-cache-dir \
- accelerate \
- datasets \
- hf-doc-builder \
- huggingface-hub \
- Jinja2 \
- librosa \
- numpy \
- scipy \
- tensorboard \
- transformers \
- omegaconf \
- pytorch-lightning \
- xformers
-
-CMD ["/bin/bash"]
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py
deleted file mode 100644
index 07604d7c082f7e7b3c89487461af81fb9650efc7..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py
+++ /dev/null
@@ -1,1645 +0,0 @@
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-""" Conversion script for the Stable Diffusion checkpoints."""
-
-import re
-from contextlib import nullcontext
-from io import BytesIO
-from typing import Optional
-
-import requests
-import torch
-from transformers import (
- AutoFeatureExtractor,
- BertTokenizerFast,
- CLIPImageProcessor,
- CLIPTextConfig,
- CLIPTextModel,
- CLIPTextModelWithProjection,
- CLIPTokenizer,
- CLIPVisionConfig,
- CLIPVisionModelWithProjection,
-)
-
-from ...models import (
- AutoencoderKL,
- ControlNetModel,
- PriorTransformer,
- UNet2DConditionModel,
-)
-from ...schedulers import (
- DDIMScheduler,
- DDPMScheduler,
- DPMSolverMultistepScheduler,
- EulerAncestralDiscreteScheduler,
- EulerDiscreteScheduler,
- HeunDiscreteScheduler,
- LMSDiscreteScheduler,
- PNDMScheduler,
- UnCLIPScheduler,
-)
-from ...utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available, logging
-from ...utils.import_utils import BACKENDS_MAPPING
-from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel
-from ..paint_by_example import PaintByExampleImageEncoder
-from ..pipeline_utils import DiffusionPipeline
-from .safety_checker import StableDiffusionSafetyChecker
-from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
-
-
-if is_accelerate_available():
- from accelerate import init_empty_weights
- from accelerate.utils import set_module_tensor_to_device
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-def shave_segments(path, n_shave_prefix_segments=1):
- """
- Removes segments. Positive values shave the first segments, negative shave the last segments.
- """
- if n_shave_prefix_segments >= 0:
- return ".".join(path.split(".")[n_shave_prefix_segments:])
- else:
- return ".".join(path.split(".")[:n_shave_prefix_segments])
-
-
-def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside resnets to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item.replace("in_layers.0", "norm1")
- new_item = new_item.replace("in_layers.2", "conv1")
-
- new_item = new_item.replace("out_layers.0", "norm2")
- new_item = new_item.replace("out_layers.3", "conv2")
-
- new_item = new_item.replace("emb_layers.1", "time_emb_proj")
- new_item = new_item.replace("skip_connection", "conv_shortcut")
-
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside resnets to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item
-
- new_item = new_item.replace("nin_shortcut", "conv_shortcut")
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def renew_attention_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside attentions to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item
-
- # new_item = new_item.replace('norm.weight', 'group_norm.weight')
- # new_item = new_item.replace('norm.bias', 'group_norm.bias')
-
- # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight')
- # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias')
-
- # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
- """
- Updates paths inside attentions to the new naming scheme (local renaming)
- """
- mapping = []
- for old_item in old_list:
- new_item = old_item
-
- new_item = new_item.replace("norm.weight", "group_norm.weight")
- new_item = new_item.replace("norm.bias", "group_norm.bias")
-
- new_item = new_item.replace("q.weight", "to_q.weight")
- new_item = new_item.replace("q.bias", "to_q.bias")
-
- new_item = new_item.replace("k.weight", "to_k.weight")
- new_item = new_item.replace("k.bias", "to_k.bias")
-
- new_item = new_item.replace("v.weight", "to_v.weight")
- new_item = new_item.replace("v.bias", "to_v.bias")
-
- new_item = new_item.replace("proj_out.weight", "to_out.0.weight")
- new_item = new_item.replace("proj_out.bias", "to_out.0.bias")
-
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
-
- mapping.append({"old": old_item, "new": new_item})
-
- return mapping
-
-
-def assign_to_checkpoint(
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
-):
- """
- This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits
- attention layers, and takes into account additional replacements that may arise.
-
- Assigns the weights to the new checkpoint.
- """
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
-
- # Splits the attention layers into three variables.
- if attention_paths_to_split is not None:
- for path, path_map in attention_paths_to_split.items():
- old_tensor = old_checkpoint[path]
- channels = old_tensor.shape[0] // 3
-
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
-
- num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
-
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
-
- checkpoint[path_map["query"]] = query.reshape(target_shape)
- checkpoint[path_map["key"]] = key.reshape(target_shape)
- checkpoint[path_map["value"]] = value.reshape(target_shape)
-
- for path in paths:
- new_path = path["new"]
-
- # These have already been assigned
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
- continue
-
- # Global renaming happens here
- new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
- new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
- new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
-
- if additional_replacements is not None:
- for replacement in additional_replacements:
- new_path = new_path.replace(replacement["old"], replacement["new"])
-
- # proj_attn.weight has to be converted from conv 1D to linear
- is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path)
- shape = old_checkpoint[path["old"]].shape
- if is_attn_weight and len(shape) == 3:
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
- elif is_attn_weight and len(shape) == 4:
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0]
- else:
- checkpoint[new_path] = old_checkpoint[path["old"]]
-
-
-def conv_attn_to_linear(checkpoint):
- keys = list(checkpoint.keys())
- attn_keys = ["query.weight", "key.weight", "value.weight"]
- for key in keys:
- if ".".join(key.split(".")[-2:]) in attn_keys:
- if checkpoint[key].ndim > 2:
- checkpoint[key] = checkpoint[key][:, :, 0, 0]
- elif "proj_attn.weight" in key:
- if checkpoint[key].ndim > 2:
- checkpoint[key] = checkpoint[key][:, :, 0]
-
-
-def create_unet_diffusers_config(original_config, image_size: int, controlnet=False):
- """
- Creates a config for the diffusers based on the config of the LDM model.
- """
- if controlnet:
- unet_params = original_config.model.params.control_stage_config.params
- else:
- if "unet_config" in original_config.model.params and original_config.model.params.unet_config is not None:
- unet_params = original_config.model.params.unet_config.params
- else:
- unet_params = original_config.model.params.network_config.params
-
- vae_params = original_config.model.params.first_stage_config.params.ddconfig
-
- block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult]
-
- down_block_types = []
- resolution = 1
- for i in range(len(block_out_channels)):
- block_type = "CrossAttnDownBlock2D" if resolution in unet_params.attention_resolutions else "DownBlock2D"
- down_block_types.append(block_type)
- if i != len(block_out_channels) - 1:
- resolution *= 2
-
- up_block_types = []
- for i in range(len(block_out_channels)):
- block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D"
- up_block_types.append(block_type)
- resolution //= 2
-
- if unet_params.transformer_depth is not None:
- transformer_layers_per_block = (
- unet_params.transformer_depth
- if isinstance(unet_params.transformer_depth, int)
- else list(unet_params.transformer_depth)
- )
- else:
- transformer_layers_per_block = 1
-
- vae_scale_factor = 2 ** (len(vae_params.ch_mult) - 1)
-
- head_dim = unet_params.num_heads if "num_heads" in unet_params else None
- use_linear_projection = (
- unet_params.use_linear_in_transformer if "use_linear_in_transformer" in unet_params else False
- )
- if use_linear_projection:
- # stable diffusion 2-base-512 and 2-768
- if head_dim is None:
- head_dim_mult = unet_params.model_channels // unet_params.num_head_channels
- head_dim = [head_dim_mult * c for c in list(unet_params.channel_mult)]
-
- class_embed_type = None
- addition_embed_type = None
- addition_time_embed_dim = None
- projection_class_embeddings_input_dim = None
- context_dim = None
-
- if unet_params.context_dim is not None:
- context_dim = (
- unet_params.context_dim if isinstance(unet_params.context_dim, int) else unet_params.context_dim[0]
- )
-
- if "num_classes" in unet_params:
- if unet_params.num_classes == "sequential":
- if context_dim in [2048, 1280]:
- # SDXL
- addition_embed_type = "text_time"
- addition_time_embed_dim = 256
- else:
- class_embed_type = "projection"
- assert "adm_in_channels" in unet_params
- projection_class_embeddings_input_dim = unet_params.adm_in_channels
- else:
- raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params.num_classes}")
-
- config = {
- "sample_size": image_size // vae_scale_factor,
- "in_channels": unet_params.in_channels,
- "down_block_types": tuple(down_block_types),
- "block_out_channels": tuple(block_out_channels),
- "layers_per_block": unet_params.num_res_blocks,
- "cross_attention_dim": context_dim,
- "attention_head_dim": head_dim,
- "use_linear_projection": use_linear_projection,
- "class_embed_type": class_embed_type,
- "addition_embed_type": addition_embed_type,
- "addition_time_embed_dim": addition_time_embed_dim,
- "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim,
- "transformer_layers_per_block": transformer_layers_per_block,
- }
-
- if controlnet:
- config["conditioning_channels"] = unet_params.hint_channels
- else:
- config["out_channels"] = unet_params.out_channels
- config["up_block_types"] = tuple(up_block_types)
-
- return config
-
-
-def create_vae_diffusers_config(original_config, image_size: int):
- """
- Creates a config for the diffusers based on the config of the LDM model.
- """
- vae_params = original_config.model.params.first_stage_config.params.ddconfig
- _ = original_config.model.params.first_stage_config.params.embed_dim
-
- block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
- down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
- up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
-
- config = {
- "sample_size": image_size,
- "in_channels": vae_params.in_channels,
- "out_channels": vae_params.out_ch,
- "down_block_types": tuple(down_block_types),
- "up_block_types": tuple(up_block_types),
- "block_out_channels": tuple(block_out_channels),
- "latent_channels": vae_params.z_channels,
- "layers_per_block": vae_params.num_res_blocks,
- }
- return config
-
-
-def create_diffusers_schedular(original_config):
- schedular = DDIMScheduler(
- num_train_timesteps=original_config.model.params.timesteps,
- beta_start=original_config.model.params.linear_start,
- beta_end=original_config.model.params.linear_end,
- beta_schedule="scaled_linear",
- )
- return schedular
-
-
-def create_ldm_bert_config(original_config):
- bert_params = original_config.model.parms.cond_stage_config.params
- config = LDMBertConfig(
- d_model=bert_params.n_embed,
- encoder_layers=bert_params.n_layer,
- encoder_ffn_dim=bert_params.n_embed * 4,
- )
- return config
-
-
-def convert_ldm_unet_checkpoint(
- checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False
-):
- """
- Takes a state dict and a config, and returns a converted checkpoint.
- """
-
- if skip_extract_state_dict:
- unet_state_dict = checkpoint
- else:
- # extract state_dict for UNet
- unet_state_dict = {}
- keys = list(checkpoint.keys())
-
- if controlnet:
- unet_key = "control_model."
- else:
- unet_key = "model.diffusion_model."
-
- # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA
- if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema:
- logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.")
- logger.warning(
- "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA"
- " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag."
- )
- for key in keys:
- if key.startswith("model.diffusion_model"):
- flat_ema_key = "model_ema." + "".join(key.split(".")[1:])
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key)
- else:
- if sum(k.startswith("model_ema") for k in keys) > 100:
- logger.warning(
- "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA"
- " weights (usually better for inference), please make sure to add the `--extract_ema` flag."
- )
-
- for key in keys:
- if key.startswith(unet_key):
- unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key)
-
- new_checkpoint = {}
-
- new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"]
- new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"]
- new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"]
- new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"]
-
- if config["class_embed_type"] is None:
- # No parameters to port
- ...
- elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection":
- new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
- new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
- new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
- new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
- else:
- raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}")
-
- if config["addition_embed_type"] == "text_time":
- new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"]
- new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"]
- new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"]
- new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"]
-
- new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"]
- new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"]
-
- if not controlnet:
- new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"]
- new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"]
- new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"]
- new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"]
-
- # Retrieves the keys for the input blocks only
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer})
- input_blocks = {
- layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key]
- for layer_id in range(num_input_blocks)
- }
-
- # Retrieves the keys for the middle blocks only
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer})
- middle_blocks = {
- layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key]
- for layer_id in range(num_middle_blocks)
- }
-
- # Retrieves the keys for the output blocks only
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer})
- output_blocks = {
- layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key]
- for layer_id in range(num_output_blocks)
- }
-
- for i in range(1, num_input_blocks):
- block_id = (i - 1) // (config["layers_per_block"] + 1)
- layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1)
-
- resnets = [
- key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key
- ]
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
-
- if f"input_blocks.{i}.0.op.weight" in unet_state_dict:
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop(
- f"input_blocks.{i}.0.op.weight"
- )
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop(
- f"input_blocks.{i}.0.op.bias"
- )
-
- paths = renew_resnet_paths(resnets)
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- if len(attentions):
- paths = renew_attention_paths(attentions)
- meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- resnet_0 = middle_blocks[0]
- attentions = middle_blocks[1]
- resnet_1 = middle_blocks[2]
-
- resnet_0_paths = renew_resnet_paths(resnet_0)
- assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)
-
- resnet_1_paths = renew_resnet_paths(resnet_1)
- assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)
-
- attentions_paths = renew_attention_paths(attentions)
- meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"}
- assign_to_checkpoint(
- attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- for i in range(num_output_blocks):
- block_id = i // (config["layers_per_block"] + 1)
- layer_in_block_id = i % (config["layers_per_block"] + 1)
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
- output_block_list = {}
-
- for layer in output_block_layers:
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
- if layer_id in output_block_list:
- output_block_list[layer_id].append(layer_name)
- else:
- output_block_list[layer_id] = [layer_name]
-
- if len(output_block_list) > 1:
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
- attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
-
- resnet_0_paths = renew_resnet_paths(resnets)
- paths = renew_resnet_paths(resnets)
-
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
-
- output_block_list = {k: sorted(v) for k, v in output_block_list.items()}
- if ["conv.bias", "conv.weight"] in output_block_list.values():
- index = list(output_block_list.values()).index(["conv.bias", "conv.weight"])
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[
- f"output_blocks.{i}.{index}.conv.weight"
- ]
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[
- f"output_blocks.{i}.{index}.conv.bias"
- ]
-
- # Clear attentions as they have been attributed above.
- if len(attentions) == 2:
- attentions = []
-
- if len(attentions):
- paths = renew_attention_paths(attentions)
- meta_path = {
- "old": f"output_blocks.{i}.1",
- "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
- }
- assign_to_checkpoint(
- paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config
- )
- else:
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
- for path in resnet_0_paths:
- old_path = ".".join(["output_blocks", str(i), path["old"]])
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
-
- new_checkpoint[new_path] = unet_state_dict[old_path]
-
- if controlnet:
- # conditioning embedding
-
- orig_index = 0
-
- new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.weight"
- )
- new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.bias"
- )
-
- orig_index += 2
-
- diffusers_index = 0
-
- while diffusers_index < 6:
- new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.weight"
- )
- new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.bias"
- )
- diffusers_index += 1
- orig_index += 2
-
- new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.weight"
- )
- new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop(
- f"input_hint_block.{orig_index}.bias"
- )
-
- # down blocks
- for i in range(num_input_blocks):
- new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight")
- new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias")
-
- # mid block
- new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight")
- new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias")
-
- return new_checkpoint
-
-
-def convert_ldm_vae_checkpoint(checkpoint, config):
- # extract state dict for VAE
- vae_state_dict = {}
- keys = list(checkpoint.keys())
- vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else ""
- for key in keys:
- if key.startswith(vae_key):
- vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key)
-
- new_checkpoint = {}
-
- new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
- new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
- new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
- new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
- new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
- new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
-
- new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
- new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
- new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
- new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
- new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
- new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
-
- new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
- new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
- new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
- new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
-
- # Retrieves the keys for the encoder down blocks only
- num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
- down_blocks = {
- layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
- }
-
- # Retrieves the keys for the decoder up blocks only
- num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
- up_blocks = {
- layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
- }
-
- for i in range(num_down_blocks):
- resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
-
- if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
- f"encoder.down.{i}.downsample.conv.weight"
- )
- new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
- f"encoder.down.{i}.downsample.conv.bias"
- )
-
- paths = renew_vae_resnet_paths(resnets)
- meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
-
- mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
- num_mid_res_blocks = 2
- for i in range(1, num_mid_res_blocks + 1):
- resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
-
- paths = renew_vae_resnet_paths(resnets)
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
-
- mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
- paths = renew_vae_attention_paths(mid_attentions)
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
- conv_attn_to_linear(new_checkpoint)
-
- for i in range(num_up_blocks):
- block_id = num_up_blocks - 1 - i
- resnets = [
- key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
- ]
-
- if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
- f"decoder.up.{block_id}.upsample.conv.weight"
- ]
- new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
- f"decoder.up.{block_id}.upsample.conv.bias"
- ]
-
- paths = renew_vae_resnet_paths(resnets)
- meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
-
- mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
- num_mid_res_blocks = 2
- for i in range(1, num_mid_res_blocks + 1):
- resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
-
- paths = renew_vae_resnet_paths(resnets)
- meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
-
- mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
- paths = renew_vae_attention_paths(mid_attentions)
- meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
- assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)
- conv_attn_to_linear(new_checkpoint)
- return new_checkpoint
-
-
-def convert_ldm_bert_checkpoint(checkpoint, config):
- def _copy_attn_layer(hf_attn_layer, pt_attn_layer):
- hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight
- hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight
- hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight
-
- hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight
- hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias
-
- def _copy_linear(hf_linear, pt_linear):
- hf_linear.weight = pt_linear.weight
- hf_linear.bias = pt_linear.bias
-
- def _copy_layer(hf_layer, pt_layer):
- # copy layer norms
- _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0])
- _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0])
-
- # copy attn
- _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1])
-
- # copy MLP
- pt_mlp = pt_layer[1][1]
- _copy_linear(hf_layer.fc1, pt_mlp.net[0][0])
- _copy_linear(hf_layer.fc2, pt_mlp.net[2])
-
- def _copy_layers(hf_layers, pt_layers):
- for i, hf_layer in enumerate(hf_layers):
- if i != 0:
- i += i
- pt_layer = pt_layers[i : i + 2]
- _copy_layer(hf_layer, pt_layer)
-
- hf_model = LDMBertModel(config).eval()
-
- # copy embeds
- hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight
- hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight
-
- # copy layer norm
- _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm)
-
- # copy hidden layers
- _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers)
-
- _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits)
-
- return hf_model
-
-
-def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None):
- if text_encoder is None:
- config_name = "openai/clip-vit-large-patch14"
- config = CLIPTextConfig.from_pretrained(config_name)
-
- ctx = init_empty_weights if is_accelerate_available() else nullcontext
- with ctx():
- text_model = CLIPTextModel(config)
-
- keys = list(checkpoint.keys())
-
- text_model_dict = {}
-
- remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"]
-
- for key in keys:
- for prefix in remove_prefixes:
- if key.startswith(prefix):
- text_model_dict[key[len(prefix + ".") :]] = checkpoint[key]
-
- if is_accelerate_available():
- for param_name, param in text_model_dict.items():
- set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
- else:
- if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
- text_model_dict.pop("text_model.embeddings.position_ids", None)
-
- text_model.load_state_dict(text_model_dict)
-
- return text_model
-
-
-textenc_conversion_lst = [
- ("positional_embedding", "text_model.embeddings.position_embedding.weight"),
- ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"),
- ("ln_final.weight", "text_model.final_layer_norm.weight"),
- ("ln_final.bias", "text_model.final_layer_norm.bias"),
- ("text_projection", "text_projection.weight"),
-]
-textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst}
-
-textenc_transformer_conversion_lst = [
- # (stable-diffusion, HF Diffusers)
- ("resblocks.", "text_model.encoder.layers."),
- ("ln_1", "layer_norm1"),
- ("ln_2", "layer_norm2"),
- (".c_fc.", ".fc1."),
- (".c_proj.", ".fc2."),
- (".attn", ".self_attn"),
- ("ln_final.", "transformer.text_model.final_layer_norm."),
- ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
- ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
-]
-protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst}
-textenc_pattern = re.compile("|".join(protected.keys()))
-
-
-def convert_paint_by_example_checkpoint(checkpoint):
- config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14")
- model = PaintByExampleImageEncoder(config)
-
- keys = list(checkpoint.keys())
-
- text_model_dict = {}
-
- for key in keys:
- if key.startswith("cond_stage_model.transformer"):
- text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key]
-
- # load clip vision
- model.model.load_state_dict(text_model_dict)
-
- # load mapper
- keys_mapper = {
- k[len("cond_stage_model.mapper.res") :]: v
- for k, v in checkpoint.items()
- if k.startswith("cond_stage_model.mapper")
- }
-
- MAPPING = {
- "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"],
- "attn.c_proj": ["attn1.to_out.0"],
- "ln_1": ["norm1"],
- "ln_2": ["norm3"],
- "mlp.c_fc": ["ff.net.0.proj"],
- "mlp.c_proj": ["ff.net.2"],
- }
-
- mapped_weights = {}
- for key, value in keys_mapper.items():
- prefix = key[: len("blocks.i")]
- suffix = key.split(prefix)[-1].split(".")[-1]
- name = key.split(prefix)[-1].split(suffix)[0][1:-1]
- mapped_names = MAPPING[name]
-
- num_splits = len(mapped_names)
- for i, mapped_name in enumerate(mapped_names):
- new_name = ".".join([prefix, mapped_name, suffix])
- shape = value.shape[0] // num_splits
- mapped_weights[new_name] = value[i * shape : (i + 1) * shape]
-
- model.mapper.load_state_dict(mapped_weights)
-
- # load final layer norm
- model.final_layer_norm.load_state_dict(
- {
- "bias": checkpoint["cond_stage_model.final_ln.bias"],
- "weight": checkpoint["cond_stage_model.final_ln.weight"],
- }
- )
-
- # load final proj
- model.proj_out.load_state_dict(
- {
- "bias": checkpoint["proj_out.bias"],
- "weight": checkpoint["proj_out.weight"],
- }
- )
-
- # load uncond vector
- model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"])
- return model
-
-
-def convert_open_clip_checkpoint(
- checkpoint, config_name, prefix="cond_stage_model.model.", has_projection=False, **config_kwargs
-):
- # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder")
- # text_model = CLIPTextModelWithProjection.from_pretrained(
- # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280
- # )
- config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs)
-
- ctx = init_empty_weights if is_accelerate_available() else nullcontext
- with ctx():
- text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config)
-
- keys = list(checkpoint.keys())
-
- keys_to_ignore = []
- if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23:
- # make sure to remove all keys > 22
- keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")]
- keys_to_ignore += ["cond_stage_model.model.text_projection"]
-
- text_model_dict = {}
-
- if prefix + "text_projection" in checkpoint:
- d_model = int(checkpoint[prefix + "text_projection"].shape[0])
- else:
- d_model = 1024
-
- text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids")
-
- for key in keys:
- if key in keys_to_ignore:
- continue
- if key[len(prefix) :] in textenc_conversion_map:
- if key.endswith("text_projection"):
- value = checkpoint[key].T.contiguous()
- else:
- value = checkpoint[key]
-
- text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value
-
- if key.startswith(prefix + "transformer."):
- new_key = key[len(prefix + "transformer.") :]
- if new_key.endswith(".in_proj_weight"):
- new_key = new_key[: -len(".in_proj_weight")]
- new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
- text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :]
- text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :]
- text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :]
- elif new_key.endswith(".in_proj_bias"):
- new_key = new_key[: -len(".in_proj_bias")]
- new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
- text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model]
- text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2]
- text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :]
- else:
- new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key)
-
- text_model_dict[new_key] = checkpoint[key]
-
- if is_accelerate_available():
- for param_name, param in text_model_dict.items():
- set_module_tensor_to_device(text_model, param_name, "cpu", value=param)
- else:
- if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)):
- text_model_dict.pop("text_model.embeddings.position_ids", None)
-
- text_model.load_state_dict(text_model_dict)
-
- return text_model
-
-
-def stable_unclip_image_encoder(original_config):
- """
- Returns the image processor and clip image encoder for the img2img unclip pipeline.
-
- We currently know of two types of stable unclip models which separately use the clip and the openclip image
- encoders.
- """
-
- image_embedder_config = original_config.model.params.embedder_config
-
- sd_clip_image_embedder_class = image_embedder_config.target
- sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1]
-
- if sd_clip_image_embedder_class == "ClipImageEmbedder":
- clip_model_name = image_embedder_config.params.model
-
- if clip_model_name == "ViT-L/14":
- feature_extractor = CLIPImageProcessor()
- image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
- else:
- raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}")
-
- elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder":
- feature_extractor = CLIPImageProcessor()
- image_encoder = CLIPVisionModelWithProjection.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K")
- else:
- raise NotImplementedError(
- f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}"
- )
-
- return feature_extractor, image_encoder
-
-
-def stable_unclip_image_noising_components(
- original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None
-):
- """
- Returns the noising components for the img2img and txt2img unclip pipelines.
-
- Converts the stability noise augmentor into
- 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats
- 2. a `DDPMScheduler` for holding the noise schedule
-
- If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided.
- """
- noise_aug_config = original_config.model.params.noise_aug_config
- noise_aug_class = noise_aug_config.target
- noise_aug_class = noise_aug_class.split(".")[-1]
-
- if noise_aug_class == "CLIPEmbeddingNoiseAugmentation":
- noise_aug_config = noise_aug_config.params
- embedding_dim = noise_aug_config.timestep_dim
- max_noise_level = noise_aug_config.noise_schedule_config.timesteps
- beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule
-
- image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim)
- image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule)
-
- if "clip_stats_path" in noise_aug_config:
- if clip_stats_path is None:
- raise ValueError("This stable unclip config requires a `clip_stats_path`")
-
- clip_mean, clip_std = torch.load(clip_stats_path, map_location=device)
- clip_mean = clip_mean[None, :]
- clip_std = clip_std[None, :]
-
- clip_stats_state_dict = {
- "mean": clip_mean,
- "std": clip_std,
- }
-
- image_normalizer.load_state_dict(clip_stats_state_dict)
- else:
- raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}")
-
- return image_normalizer, image_noising_scheduler
-
-
-def convert_controlnet_checkpoint(
- checkpoint,
- original_config,
- checkpoint_path,
- image_size,
- upcast_attention,
- extract_ema,
- use_linear_projection=None,
- cross_attention_dim=None,
-):
- ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True)
- ctrlnet_config["upcast_attention"] = upcast_attention
-
- ctrlnet_config.pop("sample_size")
-
- if use_linear_projection is not None:
- ctrlnet_config["use_linear_projection"] = use_linear_projection
-
- if cross_attention_dim is not None:
- ctrlnet_config["cross_attention_dim"] = cross_attention_dim
-
- controlnet = ControlNetModel(**ctrlnet_config)
-
- # Some controlnet ckpt files are distributed independently from the rest of the
- # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/
- if "time_embed.0.weight" in checkpoint:
- skip_extract_state_dict = True
- else:
- skip_extract_state_dict = False
-
- converted_ctrl_checkpoint = convert_ldm_unet_checkpoint(
- checkpoint,
- ctrlnet_config,
- path=checkpoint_path,
- extract_ema=extract_ema,
- controlnet=True,
- skip_extract_state_dict=skip_extract_state_dict,
- )
-
- controlnet.load_state_dict(converted_ctrl_checkpoint)
-
- return controlnet
-
-
-def download_from_original_stable_diffusion_ckpt(
- checkpoint_path: str,
- original_config_file: str = None,
- image_size: Optional[int] = None,
- prediction_type: str = None,
- model_type: str = None,
- extract_ema: bool = False,
- scheduler_type: str = "pndm",
- num_in_channels: Optional[int] = None,
- upcast_attention: Optional[bool] = None,
- device: str = None,
- from_safetensors: bool = False,
- stable_unclip: Optional[str] = None,
- stable_unclip_prior: Optional[str] = None,
- clip_stats_path: Optional[str] = None,
- controlnet: Optional[bool] = None,
- load_safety_checker: bool = True,
- pipeline_class: DiffusionPipeline = None,
- local_files_only=False,
- vae_path=None,
- vae=None,
- text_encoder=None,
- tokenizer=None,
-) -> DiffusionPipeline:
- """
- Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml`
- config file.
-
- Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the
- global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is
- recommended that you override the default values and/or supply an `original_config_file` wherever possible.
-
- Args:
- checkpoint_path (`str`): Path to `.ckpt` file.
- original_config_file (`str`):
- Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically
- inferred by looking for a key that only exists in SD2.0 models.
- image_size (`int`, *optional*, defaults to 512):
- The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2
- Base. Use 768 for Stable Diffusion v2.
- prediction_type (`str`, *optional*):
- The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable
- Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2.
- num_in_channels (`int`, *optional*, defaults to None):
- The number of input channels. If `None`, it will be automatically inferred.
- scheduler_type (`str`, *optional*, defaults to 'pndm'):
- Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
- "ddim"]`.
- model_type (`str`, *optional*, defaults to `None`):
- The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder",
- "FrozenCLIPEmbedder", "PaintByExample"]`.
- is_img2img (`bool`, *optional*, defaults to `False`):
- Whether the model should be loaded as an img2img pipeline.
- extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for
- checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to
- `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for
- inference. Non-EMA weights are usually better to continue fine-tuning.
- upcast_attention (`bool`, *optional*, defaults to `None`):
- Whether the attention computation should always be upcasted. This is necessary when running stable
- diffusion 2.1.
- device (`str`, *optional*, defaults to `None`):
- The device to use. Pass `None` to determine automatically.
- from_safetensors (`str`, *optional*, defaults to `False`):
- If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.
- load_safety_checker (`bool`, *optional*, defaults to `True`):
- Whether to load the safety checker or not. Defaults to `True`.
- pipeline_class (`str`, *optional*, defaults to `None`):
- The pipeline class to use. Pass `None` to determine automatically.
- local_files_only (`bool`, *optional*, defaults to `False`):
- Whether or not to only look at local files (i.e., do not try to download the model).
- vae (`AutoencoderKL`, *optional*, defaults to `None`):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
- this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
- text_encoder (`CLIPTextModel`, *optional*, defaults to `None`):
- An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel)
- to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)
- variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
- tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`):
- An instance of
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer)
- to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if
- needed.
- return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file.
- """
-
- # import pipelines here to avoid circular import error when using from_single_file method
- from diffusers import (
- LDMTextToImagePipeline,
- PaintByExamplePipeline,
- StableDiffusionControlNetPipeline,
- StableDiffusionInpaintPipeline,
- StableDiffusionPipeline,
- StableDiffusionXLImg2ImgPipeline,
- StableDiffusionXLPipeline,
- StableUnCLIPImg2ImgPipeline,
- StableUnCLIPPipeline,
- )
-
- if pipeline_class is None:
- pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline
-
- if prediction_type == "v-prediction":
- prediction_type = "v_prediction"
-
- if not is_omegaconf_available():
- raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
-
- from omegaconf import OmegaConf
-
- if from_safetensors:
- if not is_safetensors_available():
- raise ValueError(BACKENDS_MAPPING["safetensors"][1])
-
- from safetensors.torch import load_file as safe_load
-
- checkpoint = safe_load(checkpoint_path, device="cpu")
- else:
- if device is None:
- device = "cuda" if torch.cuda.is_available() else "cpu"
- checkpoint = torch.load(checkpoint_path, map_location=device)
- else:
- checkpoint = torch.load(checkpoint_path, map_location=device)
-
- # Sometimes models don't have the global_step item
- if "global_step" in checkpoint:
- global_step = checkpoint["global_step"]
- else:
- logger.debug("global_step key not found in model")
- global_step = None
-
- # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
- # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
- while "state_dict" in checkpoint:
- checkpoint = checkpoint["state_dict"]
-
- if original_config_file is None:
- key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight"
- key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias"
- key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
-
- # model_type = "v1"
- config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
-
- if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024:
- # model_type = "v2"
- config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml"
-
- if global_step == 110000:
- # v2.1 needs to upcast attention
- upcast_attention = True
- elif key_name_sd_xl_base in checkpoint:
- # only base xl has two text embedders
- config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml"
- elif key_name_sd_xl_refiner in checkpoint:
- # only refiner xl has embedder and one text embedders
- config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml"
-
- original_config_file = BytesIO(requests.get(config_url).content)
-
- original_config = OmegaConf.load(original_config_file)
-
- # Convert the text model.
- if (
- model_type is None
- and "cond_stage_config" in original_config.model.params
- and original_config.model.params.cond_stage_config is not None
- ):
- model_type = original_config.model.params.cond_stage_config.target.split(".")[-1]
- logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}")
- elif model_type is None and original_config.model.params.network_config is not None:
- if original_config.model.params.network_config.params.context_dim == 2048:
- model_type = "SDXL"
- else:
- model_type = "SDXL-Refiner"
- if image_size is None:
- image_size = 1024
-
- if num_in_channels is None and pipeline_class == StableDiffusionInpaintPipeline:
- num_in_channels = 9
- elif num_in_channels is None:
- num_in_channels = 4
-
- if "unet_config" in original_config.model.params:
- original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
-
- if (
- "parameterization" in original_config["model"]["params"]
- and original_config["model"]["params"]["parameterization"] == "v"
- ):
- if prediction_type is None:
- # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"`
- # as it relies on a brittle global step parameter here
- prediction_type = "epsilon" if global_step == 875000 else "v_prediction"
- if image_size is None:
- # NOTE: For stable diffusion 2 base one has to pass `image_size==512`
- # as it relies on a brittle global step parameter here
- image_size = 512 if global_step == 875000 else 768
- else:
- if prediction_type is None:
- prediction_type = "epsilon"
- if image_size is None:
- image_size = 512
-
- if controlnet is None and "control_stage_config" in original_config.model.params:
- controlnet = convert_controlnet_checkpoint(
- checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema
- )
-
- num_train_timesteps = getattr(original_config.model.params, "timesteps", None) or 1000
-
- if model_type in ["SDXL", "SDXL-Refiner"]:
- scheduler_dict = {
- "beta_schedule": "scaled_linear",
- "beta_start": 0.00085,
- "beta_end": 0.012,
- "interpolation_type": "linear",
- "num_train_timesteps": num_train_timesteps,
- "prediction_type": "epsilon",
- "sample_max_value": 1.0,
- "set_alpha_to_one": False,
- "skip_prk_steps": True,
- "steps_offset": 1,
- "timestep_spacing": "leading",
- }
- scheduler = EulerDiscreteScheduler.from_config(scheduler_dict)
- scheduler_type = "euler"
- else:
- beta_start = getattr(original_config.model.params, "linear_start", None) or 0.02
- beta_end = getattr(original_config.model.params, "linear_end", None) or 0.085
- scheduler = DDIMScheduler(
- beta_end=beta_end,
- beta_schedule="scaled_linear",
- beta_start=beta_start,
- num_train_timesteps=num_train_timesteps,
- steps_offset=1,
- clip_sample=False,
- set_alpha_to_one=False,
- prediction_type=prediction_type,
- )
- # make sure scheduler works correctly with DDIM
- scheduler.register_to_config(clip_sample=False)
-
- if scheduler_type == "pndm":
- config = dict(scheduler.config)
- config["skip_prk_steps"] = True
- scheduler = PNDMScheduler.from_config(config)
- elif scheduler_type == "lms":
- scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
- elif scheduler_type == "heun":
- scheduler = HeunDiscreteScheduler.from_config(scheduler.config)
- elif scheduler_type == "euler":
- scheduler = EulerDiscreteScheduler.from_config(scheduler.config)
- elif scheduler_type == "euler-ancestral":
- scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config)
- elif scheduler_type == "dpm":
- scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config)
- elif scheduler_type == "ddim":
- scheduler = scheduler
- else:
- raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!")
-
- # Convert the UNet2DConditionModel model.
- unet_config = create_unet_diffusers_config(original_config, image_size=image_size)
- unet_config["upcast_attention"] = upcast_attention
- converted_unet_checkpoint = convert_ldm_unet_checkpoint(
- checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema
- )
-
- ctx = init_empty_weights if is_accelerate_available() else nullcontext
- with ctx():
- unet = UNet2DConditionModel(**unet_config)
-
- if is_accelerate_available():
- for param_name, param in converted_unet_checkpoint.items():
- set_module_tensor_to_device(unet, param_name, "cpu", value=param)
- else:
- unet.load_state_dict(converted_unet_checkpoint)
-
- # Convert the VAE model.
- if vae_path is None and vae is None:
- vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
- converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
-
- if (
- "model" in original_config
- and "params" in original_config.model
- and "scale_factor" in original_config.model.params
- ):
- vae_scaling_factor = original_config.model.params.scale_factor
- else:
- vae_scaling_factor = 0.18215 # default SD scaling factor
-
- vae_config["scaling_factor"] = vae_scaling_factor
-
- ctx = init_empty_weights if is_accelerate_available() else nullcontext
- with ctx():
- vae = AutoencoderKL(**vae_config)
-
- if is_accelerate_available():
- for param_name, param in converted_vae_checkpoint.items():
- set_module_tensor_to_device(vae, param_name, "cpu", value=param)
- else:
- vae.load_state_dict(converted_vae_checkpoint)
- elif vae is None:
- vae = AutoencoderKL.from_pretrained(vae_path)
-
- if model_type == "FrozenOpenCLIPEmbedder":
- config_name = "stabilityai/stable-diffusion-2"
- config_kwargs = {"subfolder": "text_encoder"}
-
- text_model = convert_open_clip_checkpoint(checkpoint, config_name, **config_kwargs)
- tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", subfolder="tokenizer")
-
- if stable_unclip is None:
- if controlnet:
- pipe = pipeline_class(
- vae=vae,
- text_encoder=text_model,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- controlnet=controlnet,
- safety_checker=None,
- feature_extractor=None,
- requires_safety_checker=False,
- )
- else:
- pipe = pipeline_class(
- vae=vae,
- text_encoder=text_model,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=None,
- feature_extractor=None,
- requires_safety_checker=False,
- )
- else:
- image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components(
- original_config, clip_stats_path=clip_stats_path, device=device
- )
-
- if stable_unclip == "img2img":
- feature_extractor, image_encoder = stable_unclip_image_encoder(original_config)
-
- pipe = StableUnCLIPImg2ImgPipeline(
- # image encoding components
- feature_extractor=feature_extractor,
- image_encoder=image_encoder,
- # image noising components
- image_normalizer=image_normalizer,
- image_noising_scheduler=image_noising_scheduler,
- # regular denoising components
- tokenizer=tokenizer,
- text_encoder=text_model,
- unet=unet,
- scheduler=scheduler,
- # vae
- vae=vae,
- )
- elif stable_unclip == "txt2img":
- if stable_unclip_prior is None or stable_unclip_prior == "karlo":
- karlo_model = "kakaobrain/karlo-v1-alpha"
- prior = PriorTransformer.from_pretrained(karlo_model, subfolder="prior")
-
- prior_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
- prior_text_model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
-
- prior_scheduler = UnCLIPScheduler.from_pretrained(karlo_model, subfolder="prior_scheduler")
- prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config)
- else:
- raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}")
-
- pipe = StableUnCLIPPipeline(
- # prior components
- prior_tokenizer=prior_tokenizer,
- prior_text_encoder=prior_text_model,
- prior=prior,
- prior_scheduler=prior_scheduler,
- # image noising components
- image_normalizer=image_normalizer,
- image_noising_scheduler=image_noising_scheduler,
- # regular denoising components
- tokenizer=tokenizer,
- text_encoder=text_model,
- unet=unet,
- scheduler=scheduler,
- # vae
- vae=vae,
- )
- else:
- raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}")
- elif model_type == "PaintByExample":
- vision_model = convert_paint_by_example_checkpoint(checkpoint)
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
- feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
- pipe = PaintByExamplePipeline(
- vae=vae,
- image_encoder=vision_model,
- unet=unet,
- scheduler=scheduler,
- safety_checker=None,
- feature_extractor=feature_extractor,
- )
- elif model_type == "FrozenCLIPEmbedder":
- text_model = convert_ldm_clip_checkpoint(
- checkpoint, local_files_only=local_files_only, text_encoder=text_encoder
- )
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") if tokenizer is None else tokenizer
-
- if load_safety_checker:
- safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker")
- feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker")
- else:
- safety_checker = None
- feature_extractor = None
-
- if controlnet:
- pipe = pipeline_class(
- vae=vae,
- text_encoder=text_model,
- tokenizer=tokenizer,
- unet=unet,
- controlnet=controlnet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- else:
- pipe = pipeline_class(
- vae=vae,
- text_encoder=text_model,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
- elif model_type in ["SDXL", "SDXL-Refiner"]:
- if model_type == "SDXL":
- tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
- text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only)
- tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!")
-
- config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
- config_kwargs = {"projection_dim": 1280}
- text_encoder_2 = convert_open_clip_checkpoint(
- checkpoint, config_name, prefix="conditioner.embedders.1.model.", has_projection=True, **config_kwargs
- )
-
- pipe = StableDiffusionXLPipeline(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- text_encoder_2=text_encoder_2,
- tokenizer_2=tokenizer_2,
- unet=unet,
- scheduler=scheduler,
- force_zeros_for_empty_prompt=True,
- )
- else:
- tokenizer = None
- text_encoder = None
- tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!")
-
- config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k"
- config_kwargs = {"projection_dim": 1280}
- text_encoder_2 = convert_open_clip_checkpoint(
- checkpoint, config_name, prefix="conditioner.embedders.0.model.", has_projection=True, **config_kwargs
- )
-
- pipe = StableDiffusionXLImg2ImgPipeline(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- text_encoder_2=text_encoder_2,
- tokenizer_2=tokenizer_2,
- unet=unet,
- scheduler=scheduler,
- requires_aesthetics_score=True,
- force_zeros_for_empty_prompt=False,
- )
- else:
- text_config = create_ldm_bert_config(original_config)
- text_model = convert_ldm_bert_checkpoint(checkpoint, text_config)
- tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
- pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
-
- return pipe
-
-
-def download_controlnet_from_original_ckpt(
- checkpoint_path: str,
- original_config_file: str,
- image_size: int = 512,
- extract_ema: bool = False,
- num_in_channels: Optional[int] = None,
- upcast_attention: Optional[bool] = None,
- device: str = None,
- from_safetensors: bool = False,
- use_linear_projection: Optional[bool] = None,
- cross_attention_dim: Optional[bool] = None,
-) -> DiffusionPipeline:
- if not is_omegaconf_available():
- raise ValueError(BACKENDS_MAPPING["omegaconf"][1])
-
- from omegaconf import OmegaConf
-
- if from_safetensors:
- if not is_safetensors_available():
- raise ValueError(BACKENDS_MAPPING["safetensors"][1])
-
- from safetensors import safe_open
-
- checkpoint = {}
- with safe_open(checkpoint_path, framework="pt", device="cpu") as f:
- for key in f.keys():
- checkpoint[key] = f.get_tensor(key)
- else:
- if device is None:
- device = "cuda" if torch.cuda.is_available() else "cpu"
- checkpoint = torch.load(checkpoint_path, map_location=device)
- else:
- checkpoint = torch.load(checkpoint_path, map_location=device)
-
- # NOTE: this while loop isn't great but this controlnet checkpoint has one additional
- # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21
- while "state_dict" in checkpoint:
- checkpoint = checkpoint["state_dict"]
-
- original_config = OmegaConf.load(original_config_file)
-
- if num_in_channels is not None:
- original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels
-
- if "control_stage_config" not in original_config.model.params:
- raise ValueError("`control_stage_config` not present in original config")
-
- controlnet = convert_controlnet_checkpoint(
- checkpoint,
- original_config,
- checkpoint_path,
- image_size,
- upcast_attention,
- extract_ema,
- use_linear_projection=use_linear_projection,
- cross_attention_dim=cross_attention_dim,
- )
-
- return controlnet
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
deleted file mode 100644
index 01b8a0f3eec1117ef7c84c228a9f46763df2140f..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import gc
-import random
-import unittest
-
-import numpy as np
-import torch
-from transformers import XLMRobertaTokenizerFast
-
-from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel
-from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
-from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
-from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
-
-from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
-
-
-enable_full_determinism()
-
-
-class Dummies:
- @property
- def text_embedder_hidden_size(self):
- return 32
-
- @property
- def time_input_dim(self):
- return 32
-
- @property
- def block_out_channels_0(self):
- return self.time_input_dim
-
- @property
- def time_embed_dim(self):
- return self.time_input_dim * 4
-
- @property
- def cross_attention_dim(self):
- return 32
-
- @property
- def dummy_tokenizer(self):
- tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base")
- return tokenizer
-
- @property
- def dummy_text_encoder(self):
- torch.manual_seed(0)
- config = MCLIPConfig(
- numDims=self.cross_attention_dim,
- transformerDimensions=self.text_embedder_hidden_size,
- hidden_size=self.text_embedder_hidden_size,
- intermediate_size=37,
- num_attention_heads=4,
- num_hidden_layers=5,
- vocab_size=1005,
- )
-
- text_encoder = MultilingualCLIP(config)
- text_encoder = text_encoder.eval()
-
- return text_encoder
-
- @property
- def dummy_unet(self):
- torch.manual_seed(0)
-
- model_kwargs = {
- "in_channels": 4,
- # Out channels is double in channels because predicts mean and variance
- "out_channels": 8,
- "addition_embed_type": "text_image",
- "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
- "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
- "mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
- "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
- "layers_per_block": 1,
- "encoder_hid_dim": self.text_embedder_hidden_size,
- "encoder_hid_dim_type": "text_image_proj",
- "cross_attention_dim": self.cross_attention_dim,
- "attention_head_dim": 4,
- "resnet_time_scale_shift": "scale_shift",
- "class_embed_type": None,
- }
-
- model = UNet2DConditionModel(**model_kwargs)
- return model
-
- @property
- def dummy_movq_kwargs(self):
- return {
- "block_out_channels": [32, 64],
- "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
- "in_channels": 3,
- "latent_channels": 4,
- "layers_per_block": 1,
- "norm_num_groups": 8,
- "norm_type": "spatial",
- "num_vq_embeddings": 12,
- "out_channels": 3,
- "up_block_types": [
- "AttnUpDecoderBlock2D",
- "UpDecoderBlock2D",
- ],
- "vq_embed_dim": 4,
- }
-
- @property
- def dummy_movq(self):
- torch.manual_seed(0)
- model = VQModel(**self.dummy_movq_kwargs)
- return model
-
- def get_dummy_components(self):
- text_encoder = self.dummy_text_encoder
- tokenizer = self.dummy_tokenizer
- unet = self.dummy_unet
- movq = self.dummy_movq
-
- scheduler = DDIMScheduler(
- num_train_timesteps=1000,
- beta_schedule="linear",
- beta_start=0.00085,
- beta_end=0.012,
- clip_sample=False,
- set_alpha_to_one=False,
- steps_offset=1,
- prediction_type="epsilon",
- thresholding=False,
- )
-
- components = {
- "text_encoder": text_encoder,
- "tokenizer": tokenizer,
- "unet": unet,
- "scheduler": scheduler,
- "movq": movq,
- }
- return components
-
- def get_dummy_inputs(self, device, seed=0):
- image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device)
- negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device)
- if str(device).startswith("mps"):
- generator = torch.manual_seed(seed)
- else:
- generator = torch.Generator(device=device).manual_seed(seed)
- inputs = {
- "prompt": "horse",
- "image_embeds": image_embeds,
- "negative_image_embeds": negative_image_embeds,
- "generator": generator,
- "height": 64,
- "width": 64,
- "guidance_scale": 4.0,
- "num_inference_steps": 2,
- "output_type": "np",
- }
- return inputs
-
-
-class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyPipeline
- params = [
- "prompt",
- "image_embeds",
- "negative_image_embeds",
- ]
- batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
- def get_dummy_components(self):
- dummy = Dummies()
- return dummy.get_dummy_components()
-
- def get_dummy_inputs(self, device, seed=0):
- dummy = Dummies()
- return dummy.get_dummy_inputs(device=device, seed=seed)
-
- def test_kandinsky(self):
- device = "cpu"
-
- components = self.get_dummy_components()
-
- pipe = self.pipeline_class(**components)
- pipe = pipe.to(device)
-
- pipe.set_progress_bar_config(disable=None)
-
- output = pipe(**self.get_dummy_inputs(device))
- image = output.images
-
- image_from_tuple = pipe(
- **self.get_dummy_inputs(device),
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
-
- assert image.shape == (1, 64, 64, 3)
-
- expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024])
-
- assert (
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
- assert (
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
-
- @require_torch_gpu
- def test_offloads(self):
- pipes = []
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components).to(torch_device)
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_model_cpu_offload()
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_sequential_cpu_offload()
- pipes.append(sd_pipe)
-
- image_slices = []
- for pipe in pipes:
- inputs = self.get_dummy_inputs(torch_device)
- image = pipe(**inputs).images
-
- image_slices.append(image[0, -3:, -3:, -1].flatten())
-
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
-
-
-@slow
-@require_torch_gpu
-class KandinskyPipelineIntegrationTests(unittest.TestCase):
- def tearDown(self):
- # clean up the VRAM after each test
- super().tearDown()
- gc.collect()
- torch.cuda.empty_cache()
-
- def test_kandinsky_text2img(self):
- expected_image = load_numpy(
- "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
- "/kandinsky/kandinsky_text2img_cat_fp16.npy"
- )
-
- pipe_prior = KandinskyPriorPipeline.from_pretrained(
- "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16
- )
- pipe_prior.to(torch_device)
-
- pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16)
- pipeline = pipeline.to(torch_device)
- pipeline.set_progress_bar_config(disable=None)
-
- prompt = "red cat, 4k photo"
-
- generator = torch.Generator(device="cuda").manual_seed(0)
- image_emb, zero_image_emb = pipe_prior(
- prompt,
- generator=generator,
- num_inference_steps=5,
- negative_prompt="",
- ).to_tuple()
-
- generator = torch.Generator(device="cuda").manual_seed(0)
- output = pipeline(
- prompt,
- image_embeds=image_emb,
- negative_image_embeds=zero_image_emb,
- generator=generator,
- num_inference_steps=100,
- output_type="np",
- )
-
- image = output.images[0]
-
- assert image.shape == (512, 512, 3)
-
- assert_mean_pixel_difference(image, expected_image)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py
deleted file mode 100644
index 61b9751057f10f2173b8e7edde12cca53ebbd2d0..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py
+++ /dev/null
@@ -1,19 +0,0 @@
-_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
-model = dict(
- bbox_head=dict(
- loss_cls=dict(
- _delete_=True,
- type='GHMC',
- bins=30,
- momentum=0.75,
- use_sigmoid=True,
- loss_weight=1.0),
- loss_bbox=dict(
- _delete_=True,
- type='GHMR',
- mu=0.02,
- bins=10,
- momentum=0.7,
- loss_weight=10.0)))
-optimizer_config = dict(
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py
deleted file mode 100644
index 4aa00ece55280697fc67bd727077a8c9a58cfa44..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py
+++ /dev/null
@@ -1,11 +0,0 @@
-_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py']
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[8, 11])
-checkpoint_config = dict(interval=1)
-# runtime settings
-runner = dict(type='EpochBasedRunner', max_epochs=12)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py
deleted file mode 100644
index 7243bb62893839568ec51928d88a5ad40b02a66c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py
+++ /dev/null
@@ -1,794 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
-from mmcv.ops import DeformConv2d
-from mmcv.runner import force_fp32
-
-from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,
- build_assigner, build_sampler, distance2bbox,
- multi_apply, multiclass_nms, reduce_mean)
-from ..builder import HEADS, build_loss
-from .atss_head import ATSSHead
-from .fcos_head import FCOSHead
-
-INF = 1e8
-
-
-@HEADS.register_module()
-class VFNetHead(ATSSHead, FCOSHead):
- """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
- Detector.`_.
-
- The VFNet predicts IoU-aware classification scores which mix the
- object presence confidence and object localization accuracy as the
- detection score. It is built on the FCOS architecture and uses ATSS
- for defining positive/negative training examples. The VFNet is trained
- with Varifocal Loss and empolys star-shaped deformable convolution to
- extract features for a bbox.
-
- Args:
- num_classes (int): Number of categories excluding the background
- category.
- in_channels (int): Number of channels in the input feature map.
- regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
- level points.
- center_sampling (bool): If true, use center sampling. Default: False.
- center_sample_radius (float): Radius of center sampling. Default: 1.5.
- sync_num_pos (bool): If true, synchronize the number of positive
- examples across GPUs. Default: True
- gradient_mul (float): The multiplier to gradients from bbox refinement
- and recognition. Default: 0.1.
- bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
- 'stride'. Default: reg_denom
- loss_cls_fl (dict): Config of focal loss.
- use_vfl (bool): If true, use varifocal loss for training.
- Default: True.
- loss_cls (dict): Config of varifocal loss.
- loss_bbox (dict): Config of localization loss, GIoU Loss.
- loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
- norm_cfg (dict): dictionary to construct and config norm layer.
- Default: norm_cfg=dict(type='GN', num_groups=32,
- requires_grad=True).
- use_atss (bool): If true, use ATSS to define positive/negative
- examples. Default: True.
- anchor_generator (dict): Config of anchor generator for ATSS.
-
- Example:
- >>> self = VFNetHead(11, 7)
- >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
- >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
- >>> assert len(cls_score) == len(self.scales)
- """ # noqa: E501
-
- def __init__(self,
- num_classes,
- in_channels,
- regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
- (512, INF)),
- center_sampling=False,
- center_sample_radius=1.5,
- sync_num_pos=True,
- gradient_mul=0.1,
- bbox_norm_type='reg_denom',
- loss_cls_fl=dict(
- type='FocalLoss',
- use_sigmoid=True,
- gamma=2.0,
- alpha=0.25,
- loss_weight=1.0),
- use_vfl=True,
- loss_cls=dict(
- type='VarifocalLoss',
- use_sigmoid=True,
- alpha=0.75,
- gamma=2.0,
- iou_weighted=True,
- loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
- loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
- norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
- use_atss=True,
- anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- octave_base_scale=8,
- scales_per_octave=1,
- center_offset=0.0,
- strides=[8, 16, 32, 64, 128]),
- **kwargs):
- # dcn base offsets, adapted from reppoints_head.py
- self.num_dconv_points = 9
- self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
- self.dcn_pad = int((self.dcn_kernel - 1) / 2)
- dcn_base = np.arange(-self.dcn_pad,
- self.dcn_pad + 1).astype(np.float64)
- dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
- dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
- dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
- (-1))
- self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
-
- super(FCOSHead, self).__init__(
- num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)
- self.regress_ranges = regress_ranges
- self.reg_denoms = [
- regress_range[-1] for regress_range in regress_ranges
- ]
- self.reg_denoms[-1] = self.reg_denoms[-2] * 2
- self.center_sampling = center_sampling
- self.center_sample_radius = center_sample_radius
- self.sync_num_pos = sync_num_pos
- self.bbox_norm_type = bbox_norm_type
- self.gradient_mul = gradient_mul
- self.use_vfl = use_vfl
- if self.use_vfl:
- self.loss_cls = build_loss(loss_cls)
- else:
- self.loss_cls = build_loss(loss_cls_fl)
- self.loss_bbox = build_loss(loss_bbox)
- self.loss_bbox_refine = build_loss(loss_bbox_refine)
-
- # for getting ATSS targets
- self.use_atss = use_atss
- self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
- self.anchor_generator = build_anchor_generator(anchor_generator)
- self.anchor_center_offset = anchor_generator['center_offset']
- self.num_anchors = self.anchor_generator.num_base_anchors[0]
- self.sampling = False
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- super(FCOSHead, self)._init_cls_convs()
- super(FCOSHead, self)._init_reg_convs()
- self.relu = nn.ReLU(inplace=True)
- self.vfnet_reg_conv = ConvModule(
- self.feat_channels,
- self.feat_channels,
- 3,
- stride=1,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- bias=self.conv_bias)
- self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
- self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
-
- self.vfnet_reg_refine_dconv = DeformConv2d(
- self.feat_channels,
- self.feat_channels,
- self.dcn_kernel,
- 1,
- padding=self.dcn_pad)
- self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
- self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
-
- self.vfnet_cls_dconv = DeformConv2d(
- self.feat_channels,
- self.feat_channels,
- self.dcn_kernel,
- 1,
- padding=self.dcn_pad)
- self.vfnet_cls = nn.Conv2d(
- self.feat_channels, self.cls_out_channels, 3, padding=1)
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.cls_convs:
- if isinstance(m.conv, nn.Conv2d):
- normal_init(m.conv, std=0.01)
- for m in self.reg_convs:
- if isinstance(m.conv, nn.Conv2d):
- normal_init(m.conv, std=0.01)
- normal_init(self.vfnet_reg_conv.conv, std=0.01)
- normal_init(self.vfnet_reg, std=0.01)
- normal_init(self.vfnet_reg_refine_dconv, std=0.01)
- normal_init(self.vfnet_reg_refine, std=0.01)
- normal_init(self.vfnet_cls_dconv, std=0.01)
- bias_cls = bias_init_with_prob(0.01)
- normal_init(self.vfnet_cls, std=0.01, bias=bias_cls)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
- level, each is a 4D-tensor, the channel number is
- num_points * num_classes.
- bbox_preds (list[Tensor]): Box offsets for each
- scale level, each is a 4D-tensor, the channel number is
- num_points * 4.
- bbox_preds_refine (list[Tensor]): Refined Box offsets for
- each scale level, each is a 4D-tensor, the channel
- number is num_points * 4.
- """
- return multi_apply(self.forward_single, feats, self.scales,
- self.scales_refine, self.strides, self.reg_denoms)
-
- def forward_single(self, x, scale, scale_refine, stride, reg_denom):
- """Forward features of a single scale level.
-
- Args:
- x (Tensor): FPN feature maps of the specified stride.
- scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
- the bbox prediction.
- scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
- resize the refined bbox prediction.
- stride (int): The corresponding stride for feature maps,
- used to normalize the bbox prediction when
- bbox_norm_type = 'stride'.
- reg_denom (int): The corresponding regression range for feature
- maps, only used to normalize the bbox prediction when
- bbox_norm_type = 'reg_denom'.
-
- Returns:
- tuple: iou-aware cls scores for each box, bbox predictions and
- refined bbox predictions of input feature maps.
- """
- cls_feat = x
- reg_feat = x
-
- for cls_layer in self.cls_convs:
- cls_feat = cls_layer(cls_feat)
-
- for reg_layer in self.reg_convs:
- reg_feat = reg_layer(reg_feat)
-
- # predict the bbox_pred of different level
- reg_feat_init = self.vfnet_reg_conv(reg_feat)
- if self.bbox_norm_type == 'reg_denom':
- bbox_pred = scale(
- self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
- elif self.bbox_norm_type == 'stride':
- bbox_pred = scale(
- self.vfnet_reg(reg_feat_init)).float().exp() * stride
- else:
- raise NotImplementedError
-
- # compute star deformable convolution offsets
- # converting dcn_offset to reg_feat.dtype thus VFNet can be
- # trained with FP16
- dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
- stride).to(reg_feat.dtype)
-
- # refine the bbox_pred
- reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
- bbox_pred_refine = scale_refine(
- self.vfnet_reg_refine(reg_feat)).float().exp()
- bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
-
- # predict the iou-aware cls score
- cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
- cls_score = self.vfnet_cls(cls_feat)
-
- return cls_score, bbox_pred, bbox_pred_refine
-
- def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
- """Compute the star deformable conv offsets.
-
- Args:
- bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
- gradient_mul (float): Gradient multiplier.
- stride (int): The corresponding stride for feature maps,
- used to project the bbox onto the feature map.
-
- Returns:
- dcn_offsets (Tensor): The offsets for deformable convolution.
- """
- dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
- bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
- gradient_mul * bbox_pred
- # map to the feature map scale
- bbox_pred_grad_mul = bbox_pred_grad_mul / stride
- N, C, H, W = bbox_pred.size()
-
- x1 = bbox_pred_grad_mul[:, 0, :, :]
- y1 = bbox_pred_grad_mul[:, 1, :, :]
- x2 = bbox_pred_grad_mul[:, 2, :, :]
- y2 = bbox_pred_grad_mul[:, 3, :, :]
- bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
- N, 2 * self.num_dconv_points, H, W)
- bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
- bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
- bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
- bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
- bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
- bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
- bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
- bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
- bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
- bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
- bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
- bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
- dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
-
- return dcn_offset
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
- def loss(self,
- cls_scores,
- bbox_preds,
- bbox_preds_refine,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute loss of the head.
-
- Args:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
- level, each is a 4D-tensor, the channel number is
- num_points * num_classes.
- bbox_preds (list[Tensor]): Box offsets for each
- scale level, each is a 4D-tensor, the channel number is
- num_points * 4.
- bbox_preds_refine (list[Tensor]): Refined Box offsets for
- each scale level, each is a 4D-tensor, the channel
- number is num_points * 4.
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
- Default: None.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
- bbox_preds[0].device)
- labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
- cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
- gt_bboxes_ignore)
-
- num_imgs = cls_scores[0].size(0)
- # flatten cls_scores, bbox_preds and bbox_preds_refine
- flatten_cls_scores = [
- cls_score.permute(0, 2, 3,
- 1).reshape(-1,
- self.cls_out_channels).contiguous()
- for cls_score in cls_scores
- ]
- flatten_bbox_preds = [
- bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
- for bbox_pred in bbox_preds
- ]
- flatten_bbox_preds_refine = [
- bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
- for bbox_pred_refine in bbox_preds_refine
- ]
- flatten_cls_scores = torch.cat(flatten_cls_scores)
- flatten_bbox_preds = torch.cat(flatten_bbox_preds)
- flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
- flatten_labels = torch.cat(labels)
- flatten_bbox_targets = torch.cat(bbox_targets)
- # repeat points to align with bbox_preds
- flatten_points = torch.cat(
- [points.repeat(num_imgs, 1) for points in all_level_points])
-
- # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
- bg_class_ind = self.num_classes
- pos_inds = torch.where(
- ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
- num_pos = len(pos_inds)
-
- pos_bbox_preds = flatten_bbox_preds[pos_inds]
- pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
- pos_labels = flatten_labels[pos_inds]
-
- # sync num_pos across all gpus
- if self.sync_num_pos:
- num_pos_avg_per_gpu = reduce_mean(
- pos_inds.new_tensor(num_pos).float()).item()
- num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
- else:
- num_pos_avg_per_gpu = num_pos
-
- if num_pos > 0:
- pos_bbox_targets = flatten_bbox_targets[pos_inds]
- pos_points = flatten_points[pos_inds]
-
- pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
- pos_decoded_target_preds = distance2bbox(pos_points,
- pos_bbox_targets)
- iou_targets_ini = bbox_overlaps(
- pos_decoded_bbox_preds,
- pos_decoded_target_preds.detach(),
- is_aligned=True).clamp(min=1e-6)
- bbox_weights_ini = iou_targets_ini.clone().detach()
- iou_targets_ini_avg_per_gpu = reduce_mean(
- bbox_weights_ini.sum()).item()
- bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)
- loss_bbox = self.loss_bbox(
- pos_decoded_bbox_preds,
- pos_decoded_target_preds.detach(),
- weight=bbox_weights_ini,
- avg_factor=bbox_avg_factor_ini)
-
- pos_decoded_bbox_preds_refine = \
- distance2bbox(pos_points, pos_bbox_preds_refine)
- iou_targets_rf = bbox_overlaps(
- pos_decoded_bbox_preds_refine,
- pos_decoded_target_preds.detach(),
- is_aligned=True).clamp(min=1e-6)
- bbox_weights_rf = iou_targets_rf.clone().detach()
- iou_targets_rf_avg_per_gpu = reduce_mean(
- bbox_weights_rf.sum()).item()
- bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)
- loss_bbox_refine = self.loss_bbox_refine(
- pos_decoded_bbox_preds_refine,
- pos_decoded_target_preds.detach(),
- weight=bbox_weights_rf,
- avg_factor=bbox_avg_factor_rf)
-
- # build IoU-aware cls_score targets
- if self.use_vfl:
- pos_ious = iou_targets_rf.clone().detach()
- cls_iou_targets = torch.zeros_like(flatten_cls_scores)
- cls_iou_targets[pos_inds, pos_labels] = pos_ious
- else:
- loss_bbox = pos_bbox_preds.sum() * 0
- loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
- if self.use_vfl:
- cls_iou_targets = torch.zeros_like(flatten_cls_scores)
-
- if self.use_vfl:
- loss_cls = self.loss_cls(
- flatten_cls_scores,
- cls_iou_targets,
- avg_factor=num_pos_avg_per_gpu)
- else:
- loss_cls = self.loss_cls(
- flatten_cls_scores,
- flatten_labels,
- weight=label_weights,
- avg_factor=num_pos_avg_per_gpu)
-
- return dict(
- loss_cls=loss_cls,
- loss_bbox=loss_bbox,
- loss_bbox_rf=loss_bbox_refine)
-
- @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
- def get_bboxes(self,
- cls_scores,
- bbox_preds,
- bbox_preds_refine,
- img_metas,
- cfg=None,
- rescale=None,
- with_nms=True):
- """Transform network outputs for a batch into bbox predictions.
-
- Args:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
- level with shape (N, num_points * num_classes, H, W).
- bbox_preds (list[Tensor]): Box offsets for each scale
- level with shape (N, num_points * 4, H, W).
- bbox_preds_refine (list[Tensor]): Refined Box offsets for
- each scale level with shape (N, num_points * 4, H, W).
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- cfg (mmcv.Config): Test / postprocessing configuration,
- if None, test_cfg would be used. Default: None.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before returning boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where the first 4 columns
- are bounding box positions (tl_x, tl_y, br_x, br_y) and the
- 5-th column is a score between 0 and 1. The second item is a
- (n,) tensor where each item is the predicted class label of
- the corresponding box.
- """
- assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
- num_levels = len(cls_scores)
-
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
- bbox_preds[0].device)
- result_list = []
- for img_id in range(len(img_metas)):
- cls_score_list = [
- cls_scores[i][img_id].detach() for i in range(num_levels)
- ]
- bbox_pred_list = [
- bbox_preds_refine[i][img_id].detach()
- for i in range(num_levels)
- ]
- img_shape = img_metas[img_id]['img_shape']
- scale_factor = img_metas[img_id]['scale_factor']
- det_bboxes = self._get_bboxes_single(cls_score_list,
- bbox_pred_list, mlvl_points,
- img_shape, scale_factor, cfg,
- rescale, with_nms)
- result_list.append(det_bboxes)
- return result_list
-
- def _get_bboxes_single(self,
- cls_scores,
- bbox_preds,
- mlvl_points,
- img_shape,
- scale_factor,
- cfg,
- rescale=False,
- with_nms=True):
- """Transform outputs for a single batch item into bbox predictions.
-
- Args:
- cls_scores (list[Tensor]): Box iou-aware scores for a single scale
- level with shape (num_points * num_classes, H, W).
- bbox_preds (list[Tensor]): Box offsets for a single scale
- level with shape (num_points * 4, H, W).
- mlvl_points (list[Tensor]): Box reference for a single scale level
- with shape (num_total_points, 4).
- img_shape (tuple[int]): Shape of the input image,
- (height, width, 3).
- scale_factor (ndarray): Scale factor of the image arrange as
- (w_scale, h_scale, w_scale, h_scale).
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before returning boxes.
- Default: True.
-
- Returns:
- tuple(Tensor):
- det_bboxes (Tensor): BBox predictions in shape (n, 5), where
- the first 4 columns are bounding box positions
- (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
- between 0 and 1.
- det_labels (Tensor): A (n,) tensor where each item is the
- predicted class label of the corresponding box.
- """
- cfg = self.test_cfg if cfg is None else cfg
- assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
- mlvl_bboxes = []
- mlvl_scores = []
- for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,
- mlvl_points):
- assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
- scores = cls_score.permute(1, 2, 0).reshape(
- -1, self.cls_out_channels).contiguous().sigmoid()
- bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()
-
- nms_pre = cfg.get('nms_pre', -1)
- if 0 < nms_pre < scores.shape[0]:
- max_scores, _ = scores.max(dim=1)
- _, topk_inds = max_scores.topk(nms_pre)
- points = points[topk_inds, :]
- bbox_pred = bbox_pred[topk_inds, :]
- scores = scores[topk_inds, :]
- bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
- mlvl_bboxes.append(bboxes)
- mlvl_scores.append(scores)
- mlvl_bboxes = torch.cat(mlvl_bboxes)
- if rescale:
- mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
- mlvl_scores = torch.cat(mlvl_scores)
- padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
- # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
- # BG cat_id: num_class
- mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
- if with_nms:
- det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
- cfg.score_thr, cfg.nms,
- cfg.max_per_img)
- return det_bboxes, det_labels
- else:
- return mlvl_bboxes, mlvl_scores
-
- def _get_points_single(self,
- featmap_size,
- stride,
- dtype,
- device,
- flatten=False):
- """Get points according to feature map sizes."""
- h, w = featmap_size
- x_range = torch.arange(
- 0, w * stride, stride, dtype=dtype, device=device)
- y_range = torch.arange(
- 0, h * stride, stride, dtype=dtype, device=device)
- y, x = torch.meshgrid(y_range, x_range)
- # to be compatible with anchor points in ATSS
- if self.use_atss:
- points = torch.stack(
- (x.reshape(-1), y.reshape(-1)), dim=-1) + \
- stride * self.anchor_center_offset
- else:
- points = torch.stack(
- (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
- return points
-
- def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
- img_metas, gt_bboxes_ignore):
- """A wrapper for computing ATSS and FCOS targets for points in multiple
- images.
-
- Args:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
- level with shape (N, num_points * num_classes, H, W).
- mlvl_points (list[Tensor]): Points of each fpn level, each has
- shape (num_points, 2).
- gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
- each has shape (num_gt, 4).
- gt_labels (list[Tensor]): Ground truth labels of each box,
- each has shape (num_gt,).
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
- ignored, shape (num_ignored_gts, 4).
-
- Returns:
- tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights (Tensor/None): Label weights of all levels.
- bbox_targets_list (list[Tensor]): Regression targets of each
- level, (l, t, r, b).
- bbox_weights (Tensor/None): Bbox weights of all levels.
- """
- if self.use_atss:
- return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
- gt_labels, img_metas,
- gt_bboxes_ignore)
- else:
- self.norm_on_bbox = False
- return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
-
- def _get_target_single(self, *args, **kwargs):
- """Avoid ambiguity in multiple inheritance."""
- if self.use_atss:
- return ATSSHead._get_target_single(self, *args, **kwargs)
- else:
- return FCOSHead._get_target_single(self, *args, **kwargs)
-
- def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
- """Compute FCOS regression and classification targets for points in
- multiple images.
-
- Args:
- points (list[Tensor]): Points of each fpn level, each has shape
- (num_points, 2).
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
- each has shape (num_gt, 4).
- gt_labels_list (list[Tensor]): Ground truth labels of each box,
- each has shape (num_gt,).
-
- Returns:
- tuple:
- labels (list[Tensor]): Labels of each level.
- label_weights: None, to be compatible with ATSS targets.
- bbox_targets (list[Tensor]): BBox targets of each level.
- bbox_weights: None, to be compatible with ATSS targets.
- """
- labels, bbox_targets = FCOSHead.get_targets(self, points,
- gt_bboxes_list,
- gt_labels_list)
- label_weights = None
- bbox_weights = None
- return labels, label_weights, bbox_targets, bbox_weights
-
- def get_atss_targets(self,
- cls_scores,
- mlvl_points,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """A wrapper for computing ATSS targets for points in multiple images.
-
- Args:
- cls_scores (list[Tensor]): Box iou-aware scores for each scale
- level with shape (N, num_points * num_classes, H, W).
- mlvl_points (list[Tensor]): Points of each fpn level, each has
- shape (num_points, 2).
- gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
- each has shape (num_gt, 4).
- gt_labels (list[Tensor]): Ground truth labels of each box,
- each has shape (num_gt,).
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
- ignored, shape (num_ignored_gts, 4). Default: None.
-
- Returns:
- tuple:
- labels_list (list[Tensor]): Labels of each level.
- label_weights (Tensor): Label weights of all levels.
- bbox_targets_list (list[Tensor]): Regression targets of each
- level, (l, t, r, b).
- bbox_weights (Tensor): Bbox weights of all levels.
- """
- featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
- assert len(featmap_sizes) == self.anchor_generator.num_levels
-
- device = cls_scores[0].device
- anchor_list, valid_flag_list = self.get_anchors(
- featmap_sizes, img_metas, device=device)
- label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
-
- cls_reg_targets = ATSSHead.get_targets(
- self,
- anchor_list,
- valid_flag_list,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore_list=gt_bboxes_ignore,
- gt_labels_list=gt_labels,
- label_channels=label_channels,
- unmap_outputs=True)
- if cls_reg_targets is None:
- return None
-
- (anchor_list, labels_list, label_weights_list, bbox_targets_list,
- bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
-
- bbox_targets_list = [
- bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
- ]
-
- num_imgs = len(img_metas)
- # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
- bbox_targets_list = self.transform_bbox_targets(
- bbox_targets_list, mlvl_points, num_imgs)
-
- labels_list = [labels.reshape(-1) for labels in labels_list]
- label_weights_list = [
- label_weights.reshape(-1) for label_weights in label_weights_list
- ]
- bbox_weights_list = [
- bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
- ]
- label_weights = torch.cat(label_weights_list)
- bbox_weights = torch.cat(bbox_weights_list)
- return labels_list, label_weights, bbox_targets_list, bbox_weights
-
- def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
- """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
-
- Args:
- decoded_bboxes (list[Tensor]): Regression targets of each level,
- in the form of (x1, y1, x2, y2).
- mlvl_points (list[Tensor]): Points of each fpn level, each has
- shape (num_points, 2).
- num_imgs (int): the number of images in a batch.
-
- Returns:
- bbox_targets (list[Tensor]): Regression targets of each level in
- the form of (l, t, r, b).
- """
- # TODO: Re-implemented in Class PointCoder
- assert len(decoded_bboxes) == len(mlvl_points)
- num_levels = len(decoded_bboxes)
- mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
- bbox_targets = []
- for i in range(num_levels):
- bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])
- bbox_targets.append(bbox_target)
-
- return bbox_targets
-
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- """Override the method in the parent class to avoid changing para's
- name."""
- pass
diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py
deleted file mode 100644
index 28e6e9166c02277dd398a68663d2e9b65d4ff4d1..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py
+++ /dev/null
@@ -1,82 +0,0 @@
-import torch
-import torch.nn as nn
-import math
-
-
-######################################################################################
-# position embedding
-######################################################################################
-class PositionEmbeddingLearned(nn.Module):
- """
- This is a learned version of the position embedding
- """
- def __init__(self, num_pos_feats=256):
- super().__init__()
- self.row_embed = nn.Embedding(32, num_pos_feats)
- self.col_embed = nn.Embedding(32, num_pos_feats)
- self.reset_parameters()
-
- def reset_parameters(self):
- nn.init.uniform_(self.row_embed.weight)
- nn.init.uniform_(self.col_embed.weight)
-
- def forward(self, x, mask):
- h, w = x.shape[-2:]
- i = torch.arange(w, device=x.device)
- j = torch.arange(h, device=x.device)
- x_emb = self.col_embed(i).unsqueeze(0).repeat(h, 1, 1)
- y_emb = self.row_embed(j).unsqueeze(1).repeat(1, w, 1)
- pos = (x_emb + y_emb).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
- return pos
-
-
-class PositionEmbeddingSine(nn.Module):
- """
- This is a standard version of the position embedding, very similar to the one used by the
- "Attention is all you need" paper, generalized to work on examples
- """
- def __init__(self, feats_dim=512, temperature=10000, normalize=False, scale=None):
- """
- explicitly encode the position using the sinusoid:
- PE(pos,2i) = sin(pos/temperature^(2*i/d_model))
- PE(pos,2i+1) = cos(pos/temperature^(2*i/d_model))
- :param feats_dim: the dimension of features, each dimension of the positional embedding to a sinusoid
- :param temperature: wavelengths from a geometric progression from scale
- :param normalize: whether to normalize the position to (0,1)
- :param scale: scale for the position embedding
- """
- super(PositionEmbeddingSine, self).__init__()
- self.feats_dim = feats_dim
- self.T = temperature
- self.norm = normalize
- if scale is None:
- scale = 2 * math.pi
- self.scale = scale
-
- def forward(self, x, mask):
- x_embed = mask.cumsum(1, dtype=torch.float32)
- y_embed = mask.cumsum(2, dtype=torch.float32)
- if self.norm:
- eps = 1e-5
- x_embed = x_embed / (x_embed[:, -1:, :] + eps) * self.scale
- y_embed = y_embed / (y_embed[:, :, -1:] + eps) * self.scale
-
- dim_t = torch.arange(self.feats_dim, dtype=torch.float32, device=x.device)
- dim_t = self.T ** (2*(dim_t//2)/self.feats_dim)
- pos_x = x_embed[:, :, :, None] / dim_t
- pos_y = y_embed[:, :, :, None] / dim_t
-
- pos_x[:, :, :, 0::2], pos_x[:, :, :, 1::2] = pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()
- pos_y[:, :, :, 0::2], pos_y[:, :, :, 1::2] = pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()
- pos = (pos_x + pos_y).permute(0, 3, 1, 2) * 0.5
- return pos
-
-
-def build_position_embed(embed_type='learned', feats_dim=512, temperature=10000):
- if embed_type == 'sine':
- pos_embed = PositionEmbeddingSine(feats_dim, temperature, normalize=True)
- elif embed_type == 'learned':
- pos_embed = PositionEmbeddingLearned(feats_dim)
- else:
- raise ValueError(f"nor supported {embed_type}")
- return pos_embed
diff --git a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py b/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py
deleted file mode 100644
index 6517853be49e61c427cf7cd9b5ed203f6d5f367e..0000000000000000000000000000000000000000
--- a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import onnxruntime
-import librosa
-import numpy as np
-import soundfile
-
-
-class ContentVec:
- def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None):
- print("load model(s) from {}".format(vec_path))
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(vec_path, providers=providers)
-
- def __call__(self, wav):
- return self.forward(wav)
-
- def forward(self, wav):
- feats = wav
- if feats.ndim == 2: # double channels
- feats = feats.mean(-1)
- assert feats.ndim == 1, feats.ndim
- feats = np.expand_dims(np.expand_dims(feats, 0), 0)
- onnx_input = {self.model.get_inputs()[0].name: feats}
- logits = self.model.run(None, onnx_input)[0]
- return logits.transpose(0, 2, 1)
-
-
-def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs):
- if f0_predictor == "pm":
- from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor
-
- f0_predictor_object = PMF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "harvest":
- from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import (
- HarvestF0Predictor,
- )
-
- f0_predictor_object = HarvestF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- elif f0_predictor == "dio":
- from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor
-
- f0_predictor_object = DioF0Predictor(
- hop_length=hop_length, sampling_rate=sampling_rate
- )
- else:
- raise Exception("Unknown f0 predictor")
- return f0_predictor_object
-
-
-class OnnxRVC:
- def __init__(
- self,
- model_path,
- sr=40000,
- hop_size=512,
- vec_path="vec-768-layer-12",
- device="cpu",
- ):
- vec_path = f"pretrained/{vec_path}.onnx"
- self.vec_model = ContentVec(vec_path, device)
- if device == "cpu" or device is None:
- providers = ["CPUExecutionProvider"]
- elif device == "cuda":
- providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
- elif device == "dml":
- providers = ["DmlExecutionProvider"]
- else:
- raise RuntimeError("Unsportted Device")
- self.model = onnxruntime.InferenceSession(model_path, providers=providers)
- self.sampling_rate = sr
- self.hop_size = hop_size
-
- def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd):
- onnx_input = {
- self.model.get_inputs()[0].name: hubert,
- self.model.get_inputs()[1].name: hubert_length,
- self.model.get_inputs()[2].name: pitch,
- self.model.get_inputs()[3].name: pitchf,
- self.model.get_inputs()[4].name: ds,
- self.model.get_inputs()[5].name: rnd,
- }
- return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16)
-
- def inference(
- self,
- raw_path,
- sid,
- f0_method="dio",
- f0_up_key=0,
- pad_time=0.5,
- cr_threshold=0.02,
- ):
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- f0_predictor = get_f0_predictor(
- f0_method,
- hop_length=self.hop_size,
- sampling_rate=self.sampling_rate,
- threshold=cr_threshold,
- )
- wav, sr = librosa.load(raw_path, sr=self.sampling_rate)
- org_length = len(wav)
- if org_length / sr > 50.0:
- raise RuntimeError("Reached Max Length")
-
- wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000)
- wav16k = wav16k
-
- hubert = self.vec_model(wav16k)
- hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32)
- hubert_length = hubert.shape[1]
-
- pitchf = f0_predictor.compute_f0(wav, hubert_length)
- pitchf = pitchf * 2 ** (f0_up_key / 12)
- pitch = pitchf.copy()
- f0_mel = 1127 * np.log(1 + pitch / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- pitch = np.rint(f0_mel).astype(np.int64)
-
- pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32)
- pitch = pitch.reshape(1, len(pitch))
- ds = np.array([sid]).astype(np.int64)
-
- rnd = np.random.randn(1, 192, hubert_length).astype(np.float32)
- hubert_length = np.array([hubert_length]).astype(np.int64)
-
- out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze()
- out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant")
- return out_wav[0:org_length]
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py
deleted file mode 100644
index a508ffa80bd715b47c190ed9d747dbc388fa5b19..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py
+++ /dev/null
@@ -1,151 +0,0 @@
-from operator import itemgetter
-from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
-
-from . import errors
-from .protocol import is_renderable, rich_cast
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderableType
-
-
-class Measurement(NamedTuple):
- """Stores the minimum and maximum widths (in characters) required to render an object."""
-
- minimum: int
- """Minimum number of cells required to render."""
- maximum: int
- """Maximum number of cells required to render."""
-
- @property
- def span(self) -> int:
- """Get difference between maximum and minimum."""
- return self.maximum - self.minimum
-
- def normalize(self) -> "Measurement":
- """Get measurement that ensures that minimum <= maximum and minimum >= 0
-
- Returns:
- Measurement: A normalized measurement.
- """
- minimum, maximum = self
- minimum = min(max(0, minimum), maximum)
- return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
-
- def with_maximum(self, width: int) -> "Measurement":
- """Get a RenderableWith where the widths are <= width.
-
- Args:
- width (int): Maximum desired width.
-
- Returns:
- Measurement: New Measurement object.
- """
- minimum, maximum = self
- return Measurement(min(minimum, width), min(maximum, width))
-
- def with_minimum(self, width: int) -> "Measurement":
- """Get a RenderableWith where the widths are >= width.
-
- Args:
- width (int): Minimum desired width.
-
- Returns:
- Measurement: New Measurement object.
- """
- minimum, maximum = self
- width = max(0, width)
- return Measurement(max(minimum, width), max(maximum, width))
-
- def clamp(
- self, min_width: Optional[int] = None, max_width: Optional[int] = None
- ) -> "Measurement":
- """Clamp a measurement within the specified range.
-
- Args:
- min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
- max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
-
- Returns:
- Measurement: New Measurement object.
- """
- measurement = self
- if min_width is not None:
- measurement = measurement.with_minimum(min_width)
- if max_width is not None:
- measurement = measurement.with_maximum(max_width)
- return measurement
-
- @classmethod
- def get(
- cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
- ) -> "Measurement":
- """Get a measurement for a renderable.
-
- Args:
- console (~rich.console.Console): Console instance.
- options (~rich.console.ConsoleOptions): Console options.
- renderable (RenderableType): An object that may be rendered with Rich.
-
- Raises:
- errors.NotRenderableError: If the object is not renderable.
-
- Returns:
- Measurement: Measurement object containing range of character widths required to render the object.
- """
- _max_width = options.max_width
- if _max_width < 1:
- return Measurement(0, 0)
- if isinstance(renderable, str):
- renderable = console.render_str(
- renderable, markup=options.markup, highlight=False
- )
- renderable = rich_cast(renderable)
- if is_renderable(renderable):
- get_console_width: Optional[
- Callable[["Console", "ConsoleOptions"], "Measurement"]
- ] = getattr(renderable, "__rich_measure__", None)
- if get_console_width is not None:
- render_width = (
- get_console_width(console, options)
- .normalize()
- .with_maximum(_max_width)
- )
- if render_width.maximum < 1:
- return Measurement(0, 0)
- return render_width.normalize()
- else:
- return Measurement(0, _max_width)
- else:
- raise errors.NotRenderableError(
- f"Unable to get render width for {renderable!r}; "
- "a str, Segment, or object with __rich_console__ method is required"
- )
-
-
-def measure_renderables(
- console: "Console",
- options: "ConsoleOptions",
- renderables: Sequence["RenderableType"],
-) -> "Measurement":
- """Get a measurement that would fit a number of renderables.
-
- Args:
- console (~rich.console.Console): Console instance.
- options (~rich.console.ConsoleOptions): Console options.
- renderables (Iterable[RenderableType]): One or more renderable objects.
-
- Returns:
- Measurement: Measurement object containing range of character widths required to
- contain all given renderables.
- """
- if not renderables:
- return Measurement(0, 0)
- get_measurement = Measurement.get
- measurements = [
- get_measurement(console, options, renderable) for renderable in renderables
- ]
- measured_width = Measurement(
- max(measurements, key=itemgetter(0)).minimum,
- max(measurements, key=itemgetter(1)).maximum,
- )
- return measured_width
diff --git a/spaces/AutoBG/Auto-BoardGame/description_generator.py b/spaces/AutoBG/Auto-BoardGame/description_generator.py
deleted file mode 100644
index a1106fb9150cf3f143185a7610065ce0ea96b924..0000000000000000000000000000000000000000
--- a/spaces/AutoBG/Auto-BoardGame/description_generator.py
+++ /dev/null
@@ -1,119 +0,0 @@
-
-import numpy as np
-import re
-import spacy
-import openai
-from operator import itemgetter
-#user input manager class
-class input_manager:
-
- #initialize key dictionary from vector data frame
- def __init__(self,key_df, slim_df, search_tokens):
- self.key_df = key_df
- self.slim_df = slim_df
- self.search_tokens = search_tokens
- self.key = dict(zip(list(key_df.columns),np.zeros(len(key_df.columns))))
- self.nlp = spacy.load("en_core_web_md")
-
- #translate input text to vector
- def set_input(self,input_cats):
- #need setup to apply correct group tag to values
- #separate known/unknown features
- k_flags = [cat for cat in input_cats if cat in list(self.key.keys())]
- unk_flags = [cat for cat in input_cats if cat not in list(self.key.keys())]
-
- #process within feature class similarity for each unknown input
- if len(unk_flags)>0:
-
- outs = []
- for word in unk_flags:
- if re.match(r"game_type_",word):
- tok = self.nlp(word.split("_")[-1])
- mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[0]],key=itemgetter(1))
- #if no known match is found (model doesn't recognize input word), we're going to discard - other solutions performance prohibitive
- if mtch[1]>0:
- outs.append("game_type_"+mtch[0])
- elif re.match(r"mechanic_",word):
- tok = self.nlp(word.split("_")[-1])
- mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[1]],key=itemgetter(1))
- if mtch[1]>0:
- outs.append("mechanic_"+mtch[0])
- elif re.match(r"category_",word):
- tok = self.nlp(word.split("_")[-1])
- mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[2]],key=itemgetter(1))
- if mtch[1]>0:
- outs.append("category_"+mtch[0])
- elif re.match(r"family_",word):
- tok = self.nlp(word.split("_")[-1])
- mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[3]],key=itemgetter(1))
- if mtch[1]>0:
- outs.append("family_"+str(mtch[0]))
-
- #if unks are processed, rejoin nearest match to known.
- k_flags = list(set(k_flags+outs))
-
- #preserve global key and ouput copy w/input keys activated to 1
- d = self.key.copy()
- for cat in k_flags:
- d[cat] = 1.0
-
- # DELETE ME
- return d
-
- def input_parser(self,in_vec):
- #extracting keys from processed vector
- ks = [k for k,v in in_vec.items() if v == 1]
-
- return ks
-
-class model_control:
- def __init__(self, apikey, model_id):
- self.api_key = apikey
- openai.api_key = self.api_key
-
- self.prompt = None
-
- self.model = openai.FineTune.retrieve(id=model_id).fine_tuned_model
-
- def prompt_formatter(self,ks):
- self.prompt = ". ".join(ks) + "\n\n###\n\n"
-
-
-
- def call_api(self,status=0):
- if status == 0:
- temp=0.5
- pres=0.7
- elif status == 1:
- temp=0.4
- pres=0.6
- elif status == 2:
- temp=0.5
- pres=0.8
-
- answer = openai.Completion.create(
- model=self.model,
- prompt=self.prompt,
- max_tokens=512,
- temperature=temp,
- stop=["END"],
- presence_penalty=pres,
- frequency_penalty=0.5
- )
- return answer['choices'][0]['text']
-
- def resp_cleanup(self,text):
-
- if ((text[-1] != "!") & (text[-1] != ".") & (text[-1] != "?")):
- text = " ".join([e+'.' for e in text.split('.')[0:-1] if e])
-
- sent = re.split(r'([.?!:])', text)
- phrases = ["[Dd]esigned by","[Dd]esigner of","[Aa]rt by","[Aa]rtist of","[Pp]ublished","[Pp]ublisher of"]
-
- pat = re.compile("(?:" + "|".join(phrases) + ")")
- fix = re.compile("(?<=[.!?])[.!?]")
-
- text = re.sub(fix,'',''.join([s for s in sent if pat.search(s) == None]))
-
-
- return text
diff --git a/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py b/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py
deleted file mode 100644
index ea6f6f9a894db245368bdaecb90faa266547a82e..0000000000000000000000000000000000000000
--- a/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import os
-import json
-from typing import Dict, Any
-import uuid
-from datetime import datetime
-import pytz
-
-import huggingface_hub
-from huggingface_hub import Repository
-
-
-class InteractionsLogger:
- def __init__(self, name: str, persist=False):
- self.persist = persist
- self.counter = 0
- self.name = name # unique id
- HF_TOKEN = os.environ.get("HF_TOKEN")
- HF_DATASET_REPO_URL = os.environ.get("HF_DATASET_REPO_URL")
- if (HF_TOKEN is not None) and (HF_DATASET_REPO_URL is not None):
- self.repo = Repository(
- local_dir="data", clone_from=HF_DATASET_REPO_URL, use_auth_token=HF_TOKEN
- )
- else:
- self.persist = False
-
- def set_goal(self, goal: str):
- # Initialize two variables for saving two files (self.messages for
- # training and self.structure_data for later use)
- self.messages = [{"goal": goal}]
- self.structured_data = {"goal": goal}
-
- def add_system(self, more: Dict):
- self.convos = [{"from": "system"} | more]
-
- def add_ai(self, msg: str):
- self.convos.append({"from": "ai", "value": msg})
- self.messages.append({"id": f"{self.name}_{self.counter}", "conversations": self.convos})
- self.counter += 1
-
- def add_structured_data(self, data: Dict[str, Any]):
- self.structured_data.update({f"turn_{self.counter}": data})
-
- def add_message(self, data: Dict[str, Any]):
- self.structured_data.update(data)
-
- def save(self):
- # add current datetime
- self.add_message({"datetime": datetime.now(pytz.utc).strftime("%m/%d/%Y %H:%M:%S %Z%z")})
- if self.persist:
- # TODO: want to add retry in a loop?
- self.repo.git_pull()
- fname = uuid.uuid4().hex[:16]
- with open(f"./data/{fname}.json", "w") as f:
- json.dump(self.messages, f, indent=2)
- with open(f"./data/{fname}.clean.json", "w") as f:
- json.dump(self.structured_data, f, indent=2)
- commit_url = self.repo.push_to_hub()
-
- def add_cost(self, cost):
- self.messages.append({"metrics": cost})
\ No newline at end of file
diff --git a/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py b/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py
deleted file mode 100644
index 32b6bcee854da7ae357daf82bd986f30db9fb72c..0000000000000000000000000000000000000000
--- a/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from argparse import ArgumentParser
-import os
-import json
-import sys
-from tqdm import tqdm
-import numpy as np
-import torch
-from torch.utils.data import DataLoader
-import torchvision.transforms as transforms
-
-sys.path.append(".")
-sys.path.append("..")
-
-from criteria.lpips.lpips import LPIPS
-from datasets.gt_res_dataset import GTResDataset
-
-
-def parse_args():
- parser = ArgumentParser(add_help=False)
- parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2'])
- parser.add_argument('--data_path', type=str, default='results')
- parser.add_argument('--gt_path', type=str, default='gt_images')
- parser.add_argument('--workers', type=int, default=4)
- parser.add_argument('--batch_size', type=int, default=4)
- parser.add_argument('--is_cars', action='store_true')
- args = parser.parse_args()
- return args
-
-
-def run(args):
- resize_dims = (256, 256)
- if args.is_cars:
- resize_dims = (192, 256)
- transform = transforms.Compose([transforms.Resize(resize_dims),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
-
- print('Loading dataset')
- dataset = GTResDataset(root_path=args.data_path,
- gt_dir=args.gt_path,
- transform=transform)
-
- dataloader = DataLoader(dataset,
- batch_size=args.batch_size,
- shuffle=False,
- num_workers=int(args.workers),
- drop_last=True)
-
- if args.mode == 'lpips':
- loss_func = LPIPS(net_type='alex')
- elif args.mode == 'l2':
- loss_func = torch.nn.MSELoss()
- else:
- raise Exception('Not a valid mode!')
- loss_func.cuda()
-
- global_i = 0
- scores_dict = {}
- all_scores = []
- for result_batch, gt_batch in tqdm(dataloader):
- for i in range(args.batch_size):
- loss = float(loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda()))
- all_scores.append(loss)
- im_path = dataset.pairs[global_i][0]
- scores_dict[os.path.basename(im_path)] = loss
- global_i += 1
-
- all_scores = list(scores_dict.values())
- mean = np.mean(all_scores)
- std = np.std(all_scores)
- result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std)
- print('Finished with ', args.data_path)
- print(result_str)
-
- out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics')
- if not os.path.exists(out_path):
- os.makedirs(out_path)
-
- with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f:
- f.write(result_str)
- with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f:
- json.dump(scores_dict, f)
-
-
-if __name__ == '__main__':
- args = parse_args()
- run(args)
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py
deleted file mode 100644
index b4ceb066faf696954244205dc75376b767071217..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from detectron2.data.detection_utils import create_keypoint_hflip_indices
-
-from .coco import dataloader
-
-dataloader.train.dataset.min_keypoints = 1
-dataloader.train.dataset.names = "keypoints_coco_2017_train"
-dataloader.test.dataset.names = "keypoints_coco_2017_val"
-
-dataloader.train.mapper.update(
- use_instance_mask=False,
- use_keypoint=True,
- keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names),
-)
diff --git a/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md b/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md
deleted file mode 100644
index a975036c9735c8e8eda08d693b9dbf70cf9672bd..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
Blockman GO Nueva versión APK: Todo lo que necesitas saber
-
¿Estás buscando una aplicación divertida y emocionante que te permita jugar diferentes juegos, chatear con amigos y crear tus propios mundos? Si es así, es posible que desee echa un vistazo Blockman GO nueva versión APK. Esta es una aplicación gratuita que ofrece una gran cantidad de características y contenido para los jugadores de todas las edades y preferencias. En este artículo, le diremos todo lo que necesita saber sobre Blockman GO nueva versión APK, incluyendo lo que es, lo que es nuevo en la última versión, cómo descargarlo e instalarlo, y por qué debe jugar.
Blockman GO es una aplicación gratuita que te permite jugar varios minijuegos de estilo bloque, chatear con otros jugadores y hacer amigos. También puede crear y compartir sus propios juegos utilizando el editor incorporado. Blockman GO tiene una interfaz simple y fácil de usar, y es compatible con varios idiomas. Puedes descargar Blockman GO desde la Google Play Store o el sitio web oficial.
-
Una aplicación gratuita con minijuegos, chat y amigos
-
Una de las principales características de Blockman GO es que ofrece una amplia gama de minijuegos que puedes jugar con otros jugadores online. Algunos de los minijuegos populares son Bed Wars, Sky Wars, Murder Mystery, Egg Wars, Build Battle, Parkour y más. Cada minijuego tiene sus propias reglas, objetivos y recompensas. También puedes chatear con otros jugadores en el lobby del juego o en el juego. Puedes hacer amistad con otros jugadores enviándoles solicitudes de amistad o uniéndote a sus clubes. También puedes invitar a tus amigos a jugar contigo en habitaciones privadas.
-
Una plataforma para crear y compartir tus propios juegos
-
-
¿Qué hay de nuevo en la última versión de Blockman GO?
-
La última versión de Blockman GO es Garena Blockman GO, que es una colaboración con Garena Free Fire, uno de los juegos de battle royale más populares del mundo. Garena Blockman GO introduce algunas nuevas características y mejoras en la aplicación, como:
-
Garena Blockman GO: una colaboración con Garena Free Fire
-
Garena Blockman GO es una versión especial de Blockman GO que cuenta con algunos elementos de Garena Free Fire. Por ejemplo, puedes obtener algunas pieles y artículos exclusivos de Garena Free Fire en Blockman GO. También puedes participar en algunos eventos y actividades relacionadas con Garena Free Fire.
-
Frontline: un nuevo juego de disparos multijugador 30 vs 30
-
Uno de los nuevos minijuegos en Garena Blockman GO es Frontline, que es un juego de disparos multijugador de 30 vs 30. Puedes elegir unirte al equipo azul o al equipo rojo, y luchar contra el equipo enemigo en un mapa grande. Puedes usar varias armas, vehículos y tácticas para ganar el juego. También puedes ganar monedas y puntos de experiencia jugando Frontline.
-
Otras características y mejoras
-
Garena Blockman GO también trae algunas otras características y mejoras a la aplicación, como:
-
-
-
Una nueva interfaz de usuario más colorida y dinámica.
-
Un nuevo sistema de clasificación que muestra tu nivel y progreso en diferentes minijuegos.
-
Un nuevo sistema de chat que soporta mensajes de voz y texto.
-
Un nuevo sistema de recompensas que te da bonificaciones diarias de inicio de sesión, sorteos y logros.
-
Un nuevo sistema de tienda que te permite comprar y vender artículos usando monedas o diamantes.
-
-
Cómo descargar e instalar Blockman GO nueva versión APK?
-
Si desea descargar e instalar Blockman GO nueva versión APK, puede seguir estos pasos:
-
Pasos para descargar e instalar desde el sitio web oficial
-
-
Ir al sitio web oficial de Blockman GO y haga clic en el botón "Descargar".
-
-
Espera a que termine la descarga y luego abre el archivo APK.
-
Permite la instalación de fuentes desconocidas si tu dispositivo lo solicita.
-
Siga las instrucciones en la pantalla para completar la instalación.
-
Iniciar la aplicación y disfrutar de la reproducción de Blockman GO nueva versión APK.
-
-
Consejos para evitar malware y virus
-
Al descargar e instalar Blockman GO nueva versión APK, usted debe tener cuidado de algunos riesgos potenciales, tales como malware y virus. Estos son algunos consejos para evitarlos:
-
-
Solo descargar el archivo APK desde el sitio web oficial o la Google Play Store. No confíes en fuentes de terceros que dicen ofrecer el archivo APK.
-
Escanear el archivo APK con un software antivirus fiable antes de abrirlo.
-
No conceda permisos innecesarios ni acceso a la aplicación.
-
Actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores.
-
-
¿Por qué debe jugar Blockman GO nueva versión APK?
-
Blockman GO nueva versión APK es una gran aplicación para cualquier persona que ama los juegos, socializar y crear. Aquí hay algunas razones por las que deberías jugar:
-
Disfruta de una variedad de juegos divertidos y creativos
-
Blockman GO nueva versión APK ofrece una variedad de juegos divertidos y creativos que se puede jugar con otros jugadores en línea. Puedes elegir entre diferentes géneros, como acción, aventura, rompecabezas, estrategia, casual y más. También puedes probar algunos de los nuevos juegos que se agregan regularmente, como Frontline, Garena Free Fire y más. También puedes crear tus propios juegos usando el editor integrado y compartirlos con otros jugadores.
-
Conoce y chatea con jugadores de todo el mundo
-
-
Personaliza tu avatar y decora tu hogar
-
Blockman GO nueva versión APK también le permite personalizar su avatar y decorar su hogar. Puedes elegir entre diferentes pieles, trajes, accesorios, peinados y más para hacer que tu avatar luzca único. También puedes comprar o ganar algunos artículos de Garena Free Fire en Blockman GO. También puede decorar su hogar con diferentes muebles, fondos de pantalla, pisos, ventanas, puertas y más. También puedes invitar a otros jugadores a visitar tu casa o sus hogares.
-
Conclusión
-
Blockman GO nueva versión APK es una aplicación gratuita que le permite jugar diferentes juegos, chatear con amigos, y crear sus propios mundos. Tiene muchas características y contenido para jugadores de todas las edades y preferencias. También tiene algunas nuevas características y mejoras en la última versión, como Garena Blockman GO, Frontline y más. Puede descargar e instalar Blockman GO nueva versión APK desde el sitio web oficial o la Google Play Store. También debe tener cuidado con el malware y los virus al descargar e instalar la aplicación. Usted debe jugar Blockman GO nueva versión APK porque es divertido, creativo, y social.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Blockman GO nueva versión APK:
-
-
Es Blockman GO nueva versión APK seguro? Es Blockman GO nueva versión APK seguro?
-
Blockman GO nueva versión APK es seguro, siempre y cuando se descarga desde el sitio web oficial o la Google Play Store. También debe escanear el archivo APK con un software antivirus confiable antes de abrirlo. También debe evitar conceder permisos innecesarios o acceso a la aplicación. También debe actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores.
-
¿Cómo puedo obtener monedas y diamantes en Blockman GO nueva versión APK?
-
-
¿Cómo puedo crear mis propios juegos en Blockman GO nueva versión APK?
-
Puede crear sus propios juegos en Blockman GO nueva versión APK utilizando el editor incorporado. Puede acceder al editor pulsando en el botón "Crear" en la pantalla principal. Puede usar varias herramientas y recursos para diseñar sus propios mapas, personajes, elementos, scripts y más. También puede probar sus juegos antes de publicarlos. Puede compartir sus juegos con otros jugadores cargándolos en la plataforma Blockman GO. También puedes jugar a juegos de otros jugadores y calificarlos.
-
¿Cómo puedo unirse o crear un club en Blockman GO nueva versión APK?
-
Un club es un grupo de jugadores que comparten un interés común o objetivo en Blockman GO nueva versión APK. Puede unirse o crear un club tocando el botón "Club" en la pantalla principal. Puede buscar clubes existentes por nombre, categoría o popularidad. También puedes crear tu propio club eligiendo un nombre, icono, descripción y categoría. Puedes invitar a otros jugadores a unirse a tu club o aceptar sus solicitudes. También puede chatear con los miembros de su club, enviar regalos y participar en actividades del club.
-
¿Cómo puedo contactar con el servicio al cliente de Blockman GO nueva versión APK?
-
Si usted tiene alguna pregunta, problemas, o retroalimentación acerca de Blockman GO nueva versión APK, puede ponerse en contacto con el servicio al cliente tocando el botón "Feedback" en la pantalla principal. Puede elegir enviar un correo electrónico, un mensaje o una captura de pantalla al servicio de atención al cliente. También puede consultar la sección de preguntas frecuentes para algunos problemas y soluciones comunes.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md b/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md
deleted file mode 100644
index 5c12410975eec37b75ef5fdaa16c4ab545ef81a4..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Descargar AcroRip 9.0 3 Grieta completa: Lo que usted necesita saber
-
Si está buscando un software que le pueda ayudar a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más, es posible que haya oído hablar de AcroRip. AcroRip es un software RIP (procesador de imágenes raster) que puede controlar los canales de tinta de su impresora y optimizar la calidad y velocidad de impresión. Está especialmente diseñado para impresoras planas UV e impresoras directas para prendas de vestir que utilizan cabezales de impresión Epson.
Sin embargo, AcroRip no es un software libre y requiere un dongle USB especial para ejecutarse. Esto podría hacer que algunas personas busquen una manera de descargar AcroRip 9.0 3 completo crack, que es la última versión del software a partir de ahora. ¿Pero vale la pena? ¿Cuáles son las características, beneficios, inconvenientes, alternativas, pasos de instalación y revisiones de AcroRip 9.0 3? En este artículo, responderemos estas preguntas y le ayudaremos a tomar una decisión informada.
-
Características de AcroRip 9.0 3
-
AcroRip 9.0 3 es una versión actualizada del software AcroRip anterior que tiene algunas características nuevas y mejoradas. Aquí están algunas de ellas:
-
-
Impresión en blanco y en color de una pasada: Esta característica le permite imprimir tinta blanca y de color al mismo tiempo, sin necesidad de dos pases para sustratos oscuros. Esto puede ahorrarle tiempo y mejorar la calidad de impresión.
-
Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10: Esta característica hace que el software sea más sensible y estable, así como compatible con diferentes versiones del sistema operativo Windows.
-
Configuración de canal personalizado y función de onda: Esta función le permite cambiar los canales a pedido según sus requisitos personalizados. Por ejemplo, si una boquilla de color está obstruida, puede usar un canal blanco y usar tinta de color en ese canal. También puede ajustar la configuración de onda para reducir los problemas de bandas en las impresoras UV.
-
-
Compatibilidad ampliada de controladores y impresoras: Esta función admite más modelos de impresoras Epson, como Stylus Photo, EcoTank, SureColor, Stylus Pro, Expression, etc.
-
-
Beneficios de AcroRip 9.0 3
-
AcroRip 9 . 3 tiene muchos beneficios para los usuarios que quieren imprimir blanco y color juntos en varios sustratos. Estos son algunos de ellos:
-
-
-
Calidad y velocidad de impresión mejoradas: AcroRip 9.0 3 puede optimizar la calidad y la velocidad de impresión mediante el control de los canales de tinta, el uso de la impresión de una sola pasada y el uso de la configuración RIP boost. También puede reducir los problemas de bandas mediante el uso de la función de onda.
-
Menor consumo de tinta y costo: AcroRip 9.0 3 puede ahorrar tinta mediante el uso de ajustes de canal personalizados y el ajuste de la densidad de tinta y el tamaño de la gota. También puede usar tinta de color en canales blancos si es necesario, lo que puede reducir el desperdicio de tinta blanca.
-
Precisión de color mejorada y perfiles ICC: AcroRip 9.0 3 puede mejorar la precisión y consistencia del color mediante el uso de perfiles ICC y herramientas de gestión de color. También puede soportar CMYK, RGB y colores planos.
-
-
Inconvenientes de AcroRip 9.0 3
-
AcroRip 9.0 3 no es un software perfecto y tiene algunos inconvenientes que los usuarios deben tener en cuenta. Estos son algunos de ellos:
-
-
Necesidad de un dongle USB especial para ejecutar el software: AcroRip 9.0 3 requiere un dongle USB especial para activar el software y ejecutarlo en su computadora. Esto significa que necesita comprar el dongle desde el sitio web oficial o un distribuidor autorizado, y debe mantenerlo conectado cada vez que use el software. Si pierde o daña el dongle, es posible que ya no pueda usar el software.
-
-
-
Alternativas a AcroRip 9.0 3
-
Si no está satisfecho con AcroRip 9.0 3 o desea probar otras opciones, hay algunas alternativas que puede considerar. Estos son algunos de ellos:
-
-
Cadlink: Cadlink es un software RIP que admite varios tipos de impresoras, como UV, DTG, solvente, eco-solvente, etc. Tiene características como gestión de tinta blanca, creación de perfiles ICC, corrección de color, impresión de datos variables, etc.
-
EKprint: EKprint es un software RIP diseñado para impresoras DTG que utilizan cabezales de impresión Epson. Tiene características tales como impresión de un paso, cálculo de costo de tinta, verificación de boquilla, limpieza de la cabeza, etc.
-
Otras opciones de software RIP: Hay muchas otras opciones de software RIP entre las que puede elegir, dependiendo de su modelo de impresora, presupuesto y preferencias. Algunos ejemplos son Wasatch SoftRIP, Onyx RIPCenter, PhotoPrint Server Pro, etc.
-
-
Instalación de AcroRip 9.0 3
-
Si decide comprar AcroRip 9.0 3 desde el sitio web oficial o un distribuidor autorizado, tendrá que seguir estos pasos para instalar el software y el dongle:
-
-
Descargue el archivo de software desde el sitio web o el CD: Tendrá que descargar el archivo de software desde el sitio web o insertar el CD en su computadora.
-
Extraiga el archivo y ejecute el archivo setup.exe: Necesitará extraer el archivo usando un programa como WinRAR o WinZip y ejecutar el archivo setup.exe como administrador.
-
Siga las instrucciones del asistente de instalación: Tendrá que seguir las instrucciones del asistente de instalación y elegir su idioma, carpeta de destino, modelo de impresora, etc.
-
Conecte el dongle USB en su computadora: Necesitará conectar el dongle USB a su computadora antes de iniciar el software.
-
-
-
Grieta de AcroRip 9.0 3
-
Si tiene la tentación de descargar AcroRip 9.0 3 grieta completa de una fuente no oficial, como un sitio de torrent o un foro de crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Estos son algunos de ellos:
-
-
Cuestiones legales: Descargar y usar una versión agrietada de AcroRip 9.0 3 es ilegal y viola los derechos de propiedad intelectual del desarrollador de software. Usted podría enfrentar acciones legales, multas o incluso tiempo en la cárcel si lo atrapan usando una versión rota del software.
-
Problemas de seguridad: Descargar y usar una versión agrietada de AcroRip 9.0 3 es arriesgado y expone su computadora a malware, virus, spyware, ransomware y otros programas maliciosos. Puede perder sus datos, comprometer su privacidad o dañar su sistema si instala una versión rota del software.
-
Problemas de rendimiento: Descargar y usar una versión agrietada de AcroRip 9.0 3 no es confiable e inestable. Es posible que experimente errores, bloqueos, bloqueos o problemas técnicos al usar una versión rota del software. También puede perderse actualizaciones, correcciones de errores y nuevas características que ofrece la versión oficial del software.
-
-
Por lo tanto, le recomendamos encarecidamente que evite descargar y usar una versión agrietada de AcroRip 9.0 3 y en su lugar compre la versión oficial en el sitio web o en un distribuidor autorizado.
-
Revisión de AcroRip 9.0 3
-
AcroRip 9.0 3 es un software RIP popular y ampliamente utilizado que tiene muchas críticas positivas de los usuarios que lo han probado. Sin embargo, también tiene algunas críticas negativas de los usuarios que han encontrado algunos problemas con él. Aquí hay algunos pros y contras de AcroRip 9.0 3 basado en la retroalimentación del usuario:
-
-
-
Pros
-
Contras
-
-
-
- Interfaz fácil de usar e intuitiva
-
- Caro y requiere un dongle
-
-
-
-
- Problemas antivirus y errores de configuración de lado a lado
-
-
-
- Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10
-
- Atención al cliente limitada y documentación
-
-
-
- Configuración de canal personalizado y función de onda
-
- No compatible con Mac OS o Linux
-
-
-
- Configuración de impulso RIP y funcionalidad de alimentación de rollo
-
- No hay versión de prueba gratuita o demo disponible
-
-
-
- Compatibilidad ampliada de controladores y impresoras
-
- No hay comunidad en línea o foro para los usuarios
-
-
-
- Calidad y velocidad de impresión mejoradas
-
-
-
-
- Menor consumo y costo de tinta
-
-
-
-
- Precisión de color mejorada y perfiles ICC
-
-
-
Conclusión
-
En conclusión, AcroRip 9.0 3 es un software RIP que puede ayudarlo a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más. Tiene muchas características, beneficios, inconvenientes, alternativas, pasos de instalación y comentarios que necesita saber antes de decidirse a descargarlo.
-
Si desea descargar AcroRip 9.0 3 full crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Es ilegal, arriesgado, poco fiable e inestable. Le recomendamos que compre la versión oficial del sitio web o de un distribuidor autorizado.
-
Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación.
-
Preguntas frecuentes (preguntas frecuentes)
-
Aquí hay algunas preguntas frecuentes que puede tener sobre AcroRip 9.0 3:
-
-
¿Cuál es el precio de AcroRip 9.0 3?
-
El precio de AcroRip 9.0 3 varía según el vendedor y la región. Sin embargo, según el sitio web oficial, el precio es de $250 USD por un dongle.
-
¿Dónde puedo comprar AcroRip 9.0 3?
-
-
¿Cómo puedo actualizar AcroRip 9.0 3?
-
Puede actualizar AcroRip 9.0 3 descargando la última versión desde el sitio web o el CD e instalándolo en su computadora. Tendrá que mantener el dongle conectado cuando actualice el software.
-
¿Cuáles son los requisitos del sistema para AcroRip 9.0 3?
-
Los requisitos del sistema para AcroRip 9.0 3 son los siguientes:
-
-
Sistema operativo: Windows 7/8/10 (32 bits o 64 bits)
-
Procesador: Intel Core i3 o superior
-
Memoria: 4 GB de RAM o superior
-
Espacio en disco duro: 1 GB o superior
-
Pantalla: 1024 x 768 resolución o superior
-
Impresora: Impresora Epson con cabezal de impresión Epson
-
-
¿Cómo puedo contactar al equipo de soporte de AcroRip?
-
Puede ponerse en contacto con el equipo de soporte de AcroRip enviando un correo electrónico a acrorip@acrorip.com o rellenando el formulario de contacto en el sitio web. También puede consultar la sección de preguntas frecuentes y el manual del usuario en el sitio web para obtener más información.
-
¿Cómo puedo aprender más sobre AcroRip 9.0 3?
-
Puede obtener más información sobre AcroRip 9.0 3 visitando el sitio web oficial, viendo los videos tutoriales, leyendo los comentarios de los usuarios y uniéndose al grupo de Facebook para usuarios de AcroRip.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md b/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md
deleted file mode 100644
index e7eb552c0227d1e79a99f4695f19800a47a7209c..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md
+++ /dev/null
@@ -1,53 +0,0 @@
-
-
Descargar Garena Free Fire Mod v1.47.0 APK: Cómo obtener la última versión del popular juego Battle Royale
-
Si eres un fan de los juegos battle royale, debes haber oído hablar de Garena Free Fire, uno de los juegos más descargados y jugados en dispositivos Android e iOS. En este artículo, le mostraremos cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK, una versión modificada del juego que le da acceso a recursos ilimitados, trucos y más.
-
¿Qué es el fuego libre de Garena?
-
Garena Free Fire es un juego multijugador online battle royale desarrollado por 111 Dots Studio y publicado por Garena para dispositivos Android e iOS. El juego fue lanzado en 2017 y desde entonces ha ganado más de 500 millones de descargas solo en Google Play Store.
-
descargar caso penal la conspiración mod apk estrellas ilimitadas
En Garena Free Fire, puedes elegir entre una gran variedad de personajes, armas, vehículos y objetos para sobrevivir en un mapa reducido con hasta 50 jugadores. Puedes jugar en solitario, dúo o modo escuadrón, y personalizar tu personaje con diferentes pieles, trajes, accesorios y mascotas. También puedes unirte o crear un gremio, chatear con otros jugadores, participar en eventos, misiones y torneos, y posicionarte en la clasificación global.
-
¿Qué es Garena Free Fire Mod v1.47.0 APK?
-
Garena Free Fire Mod v1.47.0 APK es una versión modificada del juego original que le da algunas características adicionales y ventajas que no están disponibles en la versión oficial. Por ejemplo, puedes obtener diamantes y monedas ilimitadas, que son las principales monedas en el juego que puedes usar para comprar artículos, actualizar a tu personaje o hacer girar la rueda de la suerte.
-
-
Cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK?
-
Si desea probar Garena Free Fire Mod v1.47.0 APK, es necesario seguir estos sencillos pasos:
-
Paso 1: Descargar los archivos APK y OBB de una fuente de confianza
-
Lo primero que tienes que hacer es descargar los archivos APK y OBB de Garena Free Fire Mod v1.47.0 de una fuente confiable. Puedes usar este enlace o este enlace para obtenerlos.
-
El archivo APK es de unos 509 MB de tamaño, mientras que el archivo OBB es de unos 600 MB de tamaño. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargarlo.
-
Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo
-
Lo siguiente que debe hacer es habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store o la App Store. Para hacer esto, vaya a la configuración de su dispositivo, luego a la seguridad, luego cambie la opción de fuentes desconocidas.
-
-
Si tienes Android 8.0 o superior, es posible que tengas que permitir la instalación de aplicaciones desde fuentes específicas. Para hacer esto, vaya a la configuración del dispositivo, luego las aplicaciones y las notificaciones, luego las avanzadas, luego el acceso especial a la aplicación, luego instale aplicaciones desconocidas, luego seleccione el navegador o el administrador de archivos que utilizó para descargar los archivos APK y OBB y luego active la opción permitir de esta fuente.
-
Paso 3: Instalar el archivo APK y extraer el archivo OBB a la carpeta Android/obb
-
Después de habilitar fuentes desconocidas, ahora puede instalar el archivo APK de Garena Free Fire Mod v1.47.0. Para hacer esto, busque el archivo APK en su dispositivo usando un administrador de archivos o un navegador, luego toque en él y siga las instrucciones en la pantalla.
-
-
Paso 4: Iniciar el juego y disfrutar de las características de mod
-
Ahora que ha instalado el archivo APK y extraído el archivo OBB, puede iniciar el juego y disfrutar de las características de mod. Para hacer esto, vaya a su cajón de aplicaciones o pantalla de inicio y toque en el icono de Garena Free Fire. Debería ver una pantalla de carga con un menú mod que le permite activar o desactivar varias características de la versión modded.
-
También puede acceder al menú mod tocando el icono flotante en la pantalla durante el juego. Puede ajustar la configuración de acuerdo a sus preferencias y jugar con diamantes y monedas ilimitadas, auto-objetivo y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más.
-
¿Cuáles son las características de Garena Free Fire Mod v1.47.0 APK?
-
Garena Free Fire Mod v1.47.0 APK tiene muchas características que lo hacen diferente de la versión original del juego. Estas son algunas de las principales características y beneficios de la versión modded:
-
Diamantes y monedas ilimitadas
-
Con Garena Free Fire Mod v1.47.0 APK, puede obtener diamantes y monedas ilimitadas en su cuenta. Los diamantes y las monedas son las principales monedas del juego que puedes usar para comprar objetos, mejorar a tu personaje o hacer girar la rueda de la suerte. Normalmente, tienes que gastar dinero real o completar tareas para conseguirlas, pero con esta versión modificada, puedes conseguirlas gratis y sin límite.
-
Auto-objetivo y wallhack
-
Otra característica de Garena Free Fire Mod v1.47.0 APK es el auto-objetivo y wallhack. El objetivo automático es un truco que te permite apuntar automáticamente a tus enemigos sin tener que ajustar manualmente tu punto de mira. Wallhack es un truco que le permite ver a sus enemigos a través de las paredes y otros obstáculos. Estos trucos pueden ayudarte a ganar más partidos y posicionarte más rápido al darte una ventaja injusta sobre tus oponentes.
-
Desbloquear todos los caracteres y skins
-
-
Sin retroceso y sin niebla
-
Otra característica de Garena Free Fire Mod v1.47.0 APK no hay retroceso y no hay niebla. El retroceso es una característica que hace que tu arma se mueva hacia arriba o hacia los lados cuando la disparas, afectando tu precisión y control. La niebla es una característica que reduce la visibilidad en ciertas áreas del mapa, lo que hace que sea más difícil detectar a tus enemigos u objetivos. Estas características pueden afectar negativamente a su juego por lo que es más difícil y frustrante. Con esta versión modificada, puedes eliminarlos completamente y disfrutar de un juego más suave y claro.
-
Conclusión
-
G arena Free Fire Mod v1.47.0 APK es una versión modificada del popular juego de batalla real que le da recursos ilimitados, trucos, y más. Es fácil de descargar e instalar, y funciona en la mayoría de los dispositivos Android. Con esta versión modificada, puedes disfrutar de un juego más divertido y emocionante con características como diamantes y monedas ilimitadas, puntero automático y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más.
-
Si usted está buscando una manera de darle vida a su experiencia Garena Free Fire, definitivamente debe probar Garena Free Fire Mod v1.47.0 APK. Es gratuito, seguro y actualizado regularmente. Sin embargo, también debes tener cuidado de no abusar de las características del mod o usarlas en partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego. También debes respetar a otros jugadores y jugar limpio.
-
Entonces, ¿qué estás esperando? Descargar Garena Free Fire Mod v1.47.0 APK hoy y disfrutar del último juego de batalla real en su dispositivo Android!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Garena Free Fire Mod v1.47.0 APK:
-
Q: ¿Es seguro usar Garena Free Fire Mod v1.47.0 APK?
-
-
Q: ¿Garena Free Fire Mod v1.47.0 APK es compatible con mi dispositivo?
-
A: Garena Free Fire Mod v1.47.0 APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.0.3 o superior y al menos 2 GB de RAM. Sin embargo, es posible que algunos dispositivos no soporten las características de mod o ejecuten el juego sin problemas debido a limitaciones de hardware o conflictos de software.
-
Q: ¿Cómo puedo actualizar Garena Free Fire Mod v1.47.0 APK?
-
A: Para actualizar Garena Free Fire Mod v1.47.0 APK, es necesario descargar la última versión de los archivos APK y OBB de una fuente de confianza y seguir los mismos pasos que instalarlo por primera vez. También debes hacer una copia de seguridad de los datos del juego antes de actualizarlo para evitar perder tu progreso o configuración.
-
Q: ¿Puedo jugar Garena Free Fire Mod v1.47.0 APK con mis amigos?
-
A: Sí, puedes jugar Garena Free Fire Mod v1.47.0 APK con tus amigos, siempre y cuando también tienen la misma versión modded del juego instalado en sus dispositivos. Puedes unirte o crear un equipo con ellos y jugar juntos en cualquier modo del juego.
-
Q: ¿Puedo utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados?
-
A: No, no debe utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego por violar los términos del servicio o hacer trampa. Solo debes usar las características mod en partidos casuales o habitaciones personalizadas para fines de diversión y entretenimiento.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py
deleted file mode 100644
index 139995ac3f109a82664e4913f7ebc32ecf7617e1..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from contextlib import ExitStack, contextmanager
-from typing import ContextManager, Generator, TypeVar
-
-_T = TypeVar("_T", covariant=True)
-
-
-class CommandContextMixIn:
- def __init__(self) -> None:
- super().__init__()
- self._in_main_context = False
- self._main_context = ExitStack()
-
- @contextmanager
- def main_context(self) -> Generator[None, None, None]:
- assert not self._in_main_context
-
- self._in_main_context = True
- try:
- with self._main_context:
- yield
- finally:
- self._in_main_context = False
-
- def enter_context(self, context_provider: ContextManager[_T]) -> _T:
- assert self._in_main_context
-
- return self._main_context.enter_context(context_provider)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py
deleted file mode 100644
index 88fcb9295164f4e18827ef61fff6723e94ef7381..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py
+++ /dev/null
@@ -1,225 +0,0 @@
-import sys
-import time
-from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
-
-if sys.version_info >= (3, 8):
- from typing import Final
-else:
- from pip._vendor.typing_extensions import Final # pragma: no cover
-
-from .segment import ControlCode, ControlType, Segment
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult
-
-STRIP_CONTROL_CODES: Final = [
- 7, # Bell
- 8, # Backspace
- 11, # Vertical tab
- 12, # Form feed
- 13, # Carriage return
-]
-_CONTROL_STRIP_TRANSLATE: Final = {
- _codepoint: None for _codepoint in STRIP_CONTROL_CODES
-}
-
-CONTROL_ESCAPE: Final = {
- 7: "\\a",
- 8: "\\b",
- 11: "\\v",
- 12: "\\f",
- 13: "\\r",
-}
-
-CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
- ControlType.BELL: lambda: "\x07",
- ControlType.CARRIAGE_RETURN: lambda: "\r",
- ControlType.HOME: lambda: "\x1b[H",
- ControlType.CLEAR: lambda: "\x1b[2J",
- ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
- ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
- ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
- ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
- ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
- ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
- ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
- ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
- ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
- ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
- ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
- ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
-}
-
-
-class Control:
- """A renderable that inserts a control code (non printable but may move cursor).
-
- Args:
- *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
- tuple of ControlType and an integer parameter
- """
-
- __slots__ = ["segment"]
-
- def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
- control_codes: List[ControlCode] = [
- (code,) if isinstance(code, ControlType) else code for code in codes
- ]
- _format_map = CONTROL_CODES_FORMAT
- rendered_codes = "".join(
- _format_map[code](*parameters) for code, *parameters in control_codes
- )
- self.segment = Segment(rendered_codes, None, control_codes)
-
- @classmethod
- def bell(cls) -> "Control":
- """Ring the 'bell'."""
- return cls(ControlType.BELL)
-
- @classmethod
- def home(cls) -> "Control":
- """Move cursor to 'home' position."""
- return cls(ControlType.HOME)
-
- @classmethod
- def move(cls, x: int = 0, y: int = 0) -> "Control":
- """Move cursor relative to current position.
-
- Args:
- x (int): X offset.
- y (int): Y offset.
-
- Returns:
- ~Control: Control object.
-
- """
-
- def get_codes() -> Iterable[ControlCode]:
- control = ControlType
- if x:
- yield (
- control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
- abs(x),
- )
- if y:
- yield (
- control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
- abs(y),
- )
-
- control = cls(*get_codes())
- return control
-
- @classmethod
- def move_to_column(cls, x: int, y: int = 0) -> "Control":
- """Move to the given column, optionally add offset to row.
-
- Returns:
- x (int): absolute x (column)
- y (int): optional y offset (row)
-
- Returns:
- ~Control: Control object.
- """
-
- return (
- cls(
- (ControlType.CURSOR_MOVE_TO_COLUMN, x),
- (
- ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
- abs(y),
- ),
- )
- if y
- else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
- )
-
- @classmethod
- def move_to(cls, x: int, y: int) -> "Control":
- """Move cursor to absolute position.
-
- Args:
- x (int): x offset (column)
- y (int): y offset (row)
-
- Returns:
- ~Control: Control object.
- """
- return cls((ControlType.CURSOR_MOVE_TO, x, y))
-
- @classmethod
- def clear(cls) -> "Control":
- """Clear the screen."""
- return cls(ControlType.CLEAR)
-
- @classmethod
- def show_cursor(cls, show: bool) -> "Control":
- """Show or hide the cursor."""
- return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
-
- @classmethod
- def alt_screen(cls, enable: bool) -> "Control":
- """Enable or disable alt screen."""
- if enable:
- return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
- else:
- return cls(ControlType.DISABLE_ALT_SCREEN)
-
- @classmethod
- def title(cls, title: str) -> "Control":
- """Set the terminal window title
-
- Args:
- title (str): The new terminal window title
- """
- return cls((ControlType.SET_WINDOW_TITLE, title))
-
- def __str__(self) -> str:
- return self.segment.text
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- if self.segment.text:
- yield self.segment
-
-
-def strip_control_codes(
- text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
-) -> str:
- """Remove control codes from text.
-
- Args:
- text (str): A string possibly contain control codes.
-
- Returns:
- str: String with control codes removed.
- """
- return text.translate(_translate_table)
-
-
-def escape_control_codes(
- text: str,
- _translate_table: Dict[int, str] = CONTROL_ESCAPE,
-) -> str:
- """Replace control codes with their "escaped" equivalent in the given text.
- (e.g. "\b" becomes "\\b")
-
- Args:
- text (str): A string possibly containing control codes.
-
- Returns:
- str: String with control codes replaced with their escaped version.
- """
- return text.translate(_translate_table)
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich.console import Console
-
- console = Console()
- console.print("Look at the title of your terminal window ^")
- # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
- for i in range(10):
- console.set_window_title("🚀 Loading" + "." * i)
- time.sleep(0.5)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py
deleted file mode 100644
index c9d134cc3cedae929e5bef2b5547f7e33dc10a52..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from collections.abc import Mapping
-from typing import TYPE_CHECKING, Any, Optional, Tuple
-
-from .highlighter import ReprHighlighter
-from .panel import Panel
-from .pretty import Pretty
-from .table import Table
-from .text import Text, TextType
-
-if TYPE_CHECKING:
- from .console import ConsoleRenderable
-
-
-def render_scope(
- scope: "Mapping[str, Any]",
- *,
- title: Optional[TextType] = None,
- sort_keys: bool = True,
- indent_guides: bool = False,
- max_length: Optional[int] = None,
- max_string: Optional[int] = None,
-) -> "ConsoleRenderable":
- """Render python variables in a given scope.
-
- Args:
- scope (Mapping): A mapping containing variable names and values.
- title (str, optional): Optional title. Defaults to None.
- sort_keys (bool, optional): Enable sorting of items. Defaults to True.
- indent_guides (bool, optional): Enable indentation guides. Defaults to False.
- max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
- Defaults to None.
- max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
-
- Returns:
- ConsoleRenderable: A renderable object.
- """
- highlighter = ReprHighlighter()
- items_table = Table.grid(padding=(0, 1), expand=False)
- items_table.add_column(justify="right")
-
- def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
- """Sort special variables first, then alphabetically."""
- key, _ = item
- return (not key.startswith("__"), key.lower())
-
- items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
- for key, value in items:
- key_text = Text.assemble(
- (key, "scope.key.special" if key.startswith("__") else "scope.key"),
- (" =", "scope.equals"),
- )
- items_table.add_row(
- key_text,
- Pretty(
- value,
- highlighter=highlighter,
- indent_guides=indent_guides,
- max_length=max_length,
- max_string=max_string,
- ),
- )
- return Panel.fit(
- items_table,
- title=title,
- border_style="scope.border",
- padding=(0, 1),
- )
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich import print
-
- print()
-
- def test(foo: float, bar: float) -> None:
- list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
- dict_of_things = {
- "version": "1.1",
- "method": "confirmFruitPurchase",
- "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
- "id": "194521489",
- }
- print(render_scope(locals(), title="[i]locals", sort_keys=False))
-
- test(20.3423, 3.1427)
- print()
diff --git a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md b/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md
deleted file mode 100644
index d332893cc7009aa432b1512dc77aac3d8fda6d1d..0000000000000000000000000000000000000000
--- a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-
-# Things that might be relevant
-
-## Trained models
-
-ESPnet model for Yoloxochitl Mixtec
- - Huggingface Hub page https://huggingface.co/espnet/ftshijt_espnet2_asr_yolo_mixtec_transformer
- - Model source code https://github.com/espnet/espnet/tree/master/egs/yoloxochitl_mixtec/asr1
- - Colab notebook to setup and apply the model https://colab.research.google.com/drive/1ieoW2b3ERydjaaWuhVPBP_v2QqqWsC1Q?usp=sharing
-
-Coqui model for Yoloxochitl Mixtec
- - Huggingface Hub page
- - Coqui page https://coqui.ai/mixtec/jemeyer/v1.0.0
- - Colab notebook to setup and apply the model https://colab.research.google.com/drive/1b1SujEGC_F3XhvUCuUyZK_tyUkEaFZ7D?usp=sharing#scrollTo=6IvRFke4Ckpz
-
-Spanish ASR models
- - XLS-R model based on CV8 with LM https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-spanish
- - XLSR model based on CV6 with LM https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-spanish
- - XLSR model based on Librispeech https://huggingface.co/IIC/wav2vec2-spanish-multilibrispeech
-
-Speechbrain Language identification on Common Language (from Common Voice 6/7?)
- - source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonLanguage
- - HF Hub model page https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa
- - HF Hub space https://huggingface.co/spaces/akhaliq/Speechbrain-audio-classification
-
-Speechbrain Language identification on VoxLingua
- - source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxLingua107/lang_id
- - HF Hub model page https://huggingface.co/speechbrain/lang-id-voxlingua107-ecapa
-
-
-## Corpora
-
-OpenSLR89 https://www.openslr.org/89/
-
-Common Language https://huggingface.co/datasets/common_language
-
-VoxLingua http://bark.phon.ioc.ee/voxlingua107/
-
-Multilibrispeech https://huggingface.co/datasets/multilingual_librispeech
-
-
-# Possible demos
-
-## Simple categorization of utterances
-
-A few example files are provided for each language, and the user can record their own.
-The predicted confidence of each class label is shown.
-
-## Segmentation and identification
-
-Recordings with alternating languages in a single audio file, provided examples or the user can record.
-Some voice activity detection to split the audio, then predict language of each piece
-
-## Identication and transcription
-
-Example files for each language separately.
-The lang-id model predicts what language it is.
-The corresponding ASR model produces a transcript.
-
-## Segmentation, identification and transcription
-
-Recordings with alternating languages in a single audio file.
-Use voice activity detection to split the audio, then predict the language of each piece
-Use the corresponding ASR model to produce a transcript of each piece to display.
\ No newline at end of file
diff --git a/spaces/CVPR/LIVE/thrust/generate_mk.py b/spaces/CVPR/LIVE/thrust/generate_mk.py
deleted file mode 100644
index 84071338ccfdd99be55027c8046fe46c56e5a65b..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/generate_mk.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/usr/bin/env python
-# Generate set of projects mk files.
-# Usage: python generate_mk.py PROJECTS_MK_DIR THRUST_SOURCE_DIR
-# The program scans through unit tests and examples in THRUST_SOURCE_DIR
-# and generates project mk for each of the tests and examples in PROJECTS_MK_DIR
-# A single example or unit test source file generates its own executable
-# This program is called by a top level Makefile, but can also be used stand-alone for debugging
-# This program also generates testing.mk, examples.mk and dependencies.mk
-from __future__ import print_function
-import sys
-import shutil as sh
-import os
-import glob
-import re
-
-test_template = """
-TEST_SRC := %(TEST_SRC)s
-TEST_NAME := %(TEST_NAME)s
-include $(ROOTDIR)/thrust/internal/build/generic_test.mk
-"""
-example_template = """
-EXAMPLE_SRC := %(EXAMPLE_SRC)s
-EXAMPLE_NAME := %(EXAMPLE_NAME)s
-include $(ROOTDIR)/thrust/internal/build/generic_example.mk
-"""
-
-def Glob(pattern, directory,exclude='\B'):
- src = glob.glob(os.path.join(directory,pattern))
- p = re.compile(exclude)
- src = [s for s in src if not p.match(s)]
- return src
-
-
-def generate_test_mk(mk_path, test_path, group, TEST_DIR):
- print('Generating makefiles in "'+mk_path+'" for tests in "'+test_path+'"')
- src_cu = Glob("*.cu", test_path, ".*testframework.cu$")
- src_cxx = Glob("*.cpp", test_path)
- src_cu.sort();
- src_cxx.sort();
- src_all = src_cu + src_cxx;
- tests_all = []
- dependencies_all = []
- for s in src_all:
- fn = os.path.splitext(os.path.basename(s));
- t = "thrust."+group+"."+fn[0]
- e = fn[1]
- mkfile = test_template % {"TEST_SRC" : s, "TEST_NAME" : t}
- f = open(os.path.join(mk_path,t+".mk"), 'w')
- f.write(mkfile)
- f.close()
- tests_all.append(os.path.join(mk_path,t))
- dependencies_all.append(t+": testframework")
- return [tests_all, dependencies_all]
-
-def generate_example_mk(mk_path, example_path, group, EXAMPLE_DIR):
- print('Generating makefiles in "'+mk_path+'" for examples in "'+example_path+'"')
- src_cu = Glob("*.cu", example_path)
- src_cxx = Glob("*.cpp", example_path)
- src_cu.sort();
- src_cxx.sort();
- src_all = src_cu + src_cxx;
- examples_all = []
- for s in src_all:
- fn = os.path.splitext(os.path.basename(s));
- t = "thrust."+group+"."+fn[0]
- e = fn[1]
- mkfile = example_template % {"EXAMPLE_SRC" : s, "EXAMPLE_NAME" : t}
- f = open(os.path.join(mk_path,t+".mk"), 'w')
- f.write(mkfile)
- f.close()
- examples_all.append(os.path.join(mk_path,t))
- return examples_all
-
-
-## relpath : backported from os.relpath form python 2.6+
-def relpath(path, start):
- """Return a relative version of a path"""
-
- import posixpath
- if not path:
- raise ValueError("no path specified")
- start_list = posixpath.abspath(start).split(posixpath.sep)
- path_list = posixpath.abspath(path).split(posixpath.sep)
- # Work out how much of the filepath is shared by start and path.
- i = len(posixpath.commonprefix([start_list, path_list]))
- rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
- if not rel_list:
- return posixpath.curdir
- return posixpath.join(*rel_list)
-
-mk_path=sys.argv[1]
-REL_DIR="../../"
-if (len(sys.argv) > 2):
- root_path=sys.argv[2];
- mk_path = relpath(mk_path, root_path)
- REL_DIR = relpath(root_path,mk_path)
-
-try:
- sh.rmtree(mk_path)
-except:
- pass
-os.makedirs(mk_path)
-
-tests_all, dependencies_all = generate_test_mk(mk_path, "testing/", "test", REL_DIR)
-tests_cu, dependencies_cu = generate_test_mk(mk_path, "testing/cuda/", "test.cuda", REL_DIR)
-tests_all.extend(tests_cu)
-dependencies_all.extend(dependencies_cu)
-
-testing_mk = ""
-
-for t in tests_all:
- testing_mk += "PROJECTS += "+t+"\n"
-testing_mk += "PROJECTS += internal/build/testframework\n"
-
-
-f = open(os.path.join(mk_path,"testing.mk"),'w')
-f.write(testing_mk)
-f.close()
-
-dependencies_mk = ""
-for d in dependencies_all:
- dependencies_mk += d + "\n"
-
-f = open(os.path.join(mk_path,"dependencies.mk"),'w')
-f.write(dependencies_mk)
-f.close()
-
-
-examples_mk = ""
-examples_all = generate_example_mk(mk_path, "examples/", "example", REL_DIR)
-examples_cuda = generate_example_mk(mk_path, "examples/cuda/", "example.cuda", REL_DIR)
-examples_all.extend(examples_cuda)
-for e in examples_all:
- examples_mk += "PROJECTS += "+e+"\n"
-
-f = open(os.path.join(mk_path,"examples.mk"),'w')
-f.write(examples_mk)
-f.close()
-
-
-
-
-
-
-
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h
deleted file mode 100644
index a8736bd75d06e54d9158baeb2504162d75312885..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system inherits transform_reduce
-#include
-
diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
deleted file mode 100644
index 80c25bb8fde7844c994bfc1f4ae1a2d960cbf3d6..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
+++ /dev/null
@@ -1,83 +0,0 @@
-from mmcv.cnn.bricks import build_plugin_layer
-from mmcv.runner import force_fp32
-
-from mmdet.models.builder import ROI_EXTRACTORS
-from .base_roi_extractor import BaseRoIExtractor
-
-
-@ROI_EXTRACTORS.register_module()
-class GenericRoIExtractor(BaseRoIExtractor):
- """Extract RoI features from all level feature maps levels.
-
- This is the implementation of `A novel Region of Interest Extraction Layer
- for Instance Segmentation `_.
-
- Args:
- aggregation (str): The method to aggregate multiple feature maps.
- Options are 'sum', 'concat'. Default: 'sum'.
- pre_cfg (dict | None): Specify pre-processing modules. Default: None.
- post_cfg (dict | None): Specify post-processing modules. Default: None.
- kwargs (keyword arguments): Arguments that are the same
- as :class:`BaseRoIExtractor`.
- """
-
- def __init__(self,
- aggregation='sum',
- pre_cfg=None,
- post_cfg=None,
- **kwargs):
- super(GenericRoIExtractor, self).__init__(**kwargs)
-
- assert aggregation in ['sum', 'concat']
-
- self.aggregation = aggregation
- self.with_post = post_cfg is not None
- self.with_pre = pre_cfg is not None
- # build pre/post processing modules
- if self.with_post:
- self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
- if self.with_pre:
- self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
-
- @force_fp32(apply_to=('feats', ), out_fp16=True)
- def forward(self, feats, rois, roi_scale_factor=None):
- """Forward function."""
- if len(feats) == 1:
- return self.roi_layers[0](feats[0], rois)
-
- out_size = self.roi_layers[0].output_size
- num_levels = len(feats)
- roi_feats = feats[0].new_zeros(
- rois.size(0), self.out_channels, *out_size)
-
- # some times rois is an empty tensor
- if roi_feats.shape[0] == 0:
- return roi_feats
-
- if roi_scale_factor is not None:
- rois = self.roi_rescale(rois, roi_scale_factor)
-
- # mark the starting channels for concat mode
- start_channels = 0
- for i in range(num_levels):
- roi_feats_t = self.roi_layers[i](feats[i], rois)
- end_channels = start_channels + roi_feats_t.size(1)
- if self.with_pre:
- # apply pre-processing to a RoI extracted from each layer
- roi_feats_t = self.pre_module(roi_feats_t)
- if self.aggregation == 'sum':
- # and sum them all
- roi_feats += roi_feats_t
- else:
- # and concat them along channel dimension
- roi_feats[:, start_channels:end_channels] = roi_feats_t
- # update channels starting position
- start_channels = end_channels
- # check if concat channels match at the end
- if self.aggregation == 'concat':
- assert start_channels == self.out_channels
-
- if self.with_post:
- # apply post-processing before return the result
- roi_feats = self.post_module(roi_feats)
- return roi_feats
diff --git a/spaces/ChristopherMarais/Andrew_Alpha/README.md b/spaces/ChristopherMarais/Andrew_Alpha/README.md
deleted file mode 100644
index 7ca08e015d07910a8f6abddf12db2ad62976559a..0000000000000000000000000000000000000000
--- a/spaces/ChristopherMarais/Andrew_Alpha/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: Andrew Alpha
-emoji: 👁
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
-license: other
----
-
-This is a proof-of-concept version of our Artificial Intelligence model to classify images of bark and ambrosia beetles. As an input, please use an image of a specimen, or a group of specimens, ideally in ethanol with a white background.
-
-This proof-of-concept model has been trained on a preliminary sample of 12 species: Coccotypes dactyliperda, Hylesinus varius, Monarthrum fasciatum, Phloeosinus dentatus, Pityophthorus juglandis, Platypus cylindrus, Pycnarthrum hispidium, Scolytodes schwarzi, Xyleborinus saxesenii, Xyleborus affinis, Xylosandrus compactus, and Xylosandrus crassiusculus.
-
-For correct interpretation of the results, it is important to consider not just the suggested name, but also the associated probability. Identification of other species is coming soon, as soon as they are added to the training set.
-
-You can find example photos [here](https://ambrosiasymbiosis.org/automated_identification/examples.html)
diff --git a/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py b/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py
deleted file mode 100644
index 9fa8ec821f701d7841432e498a11ac9dd017978c..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import os
-import json
-import time
-import subprocess
-
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://phind.com'
-model = ['gpt-4']
-supports_stream = True
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
-
- path = os.path.dirname(os.path.realpath(__file__))
- config = json.dumps({
- 'model': model,
- 'messages': messages}, separators=(',', ':'))
-
- cmd = ['python', f'{path}/helpers/phind.py', config]
-
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- for line in iter(p.stdout.readline, b''):
- if b'Just a moment...' in line:
- os.system('clear' if os.name == 'posix' else 'cls')
- yield 'Clouflare error, please try again...'
- os._exit(0)
-
- else:
- if b'ping - 2023-' in line:
- continue
-
- yield line.decode('cp1251') #[:-1]
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py
deleted file mode 100644
index 9043c54b2cc8a27a37702649c8acff865f741790..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import numpy as np
-from scipy.spatial import Delaunay
-from .area_of_polygon import area_of_polygon_crd
-import networkx as nx
-
-def sqrt_sum(a, b):
- x = (a[0]-b[0])
- y = (a[1]-b[1])
- return np.sqrt(x*x+y*y)
-
-def shapeToSomePolygons(shape):
- G = nx.Graph()
- allnodes = set()
- for line in shape:
- G.add_nodes_from(line)
- G.add_edge(line[0], line[1])
- allnodes.add(line[0])
- allnodes.add(line[1])
-
- result = []
-
- while allnodes:
- node = allnodes.pop()
- new_node = next(iter(G[node]), None)
- if not new_node: continue
-
- G.remove_edge(node, new_node)
- temp = nx.shortest_path(G, node, new_node)
- for j,t in enumerate(temp):
- if t in allnodes:
- allnodes.remove(t)
- result.append(temp)
- return result
-
-def getAlfaShapes(pts,alfas=1):
- tri_ind = [(0,1),(1,2),(2,0)]
- tri = Delaunay(pts)
- lenghts={}
- for s in tri.simplices:
- for ind in tri_ind:
- a = pts[s[ind[0]]]
- b = pts[s[ind[1]]]
- # print('a---', a)
- # print('b---', b)
- line = (a, b)
- # line = ((a[0], a[1]), (b[0], b[1]))
- lenghts[line] = sqrt_sum(a, b)
-
- ls = sorted(lenghts.values())
-
- mean_length = np.mean(ls)
- mean_length_index = ls.index(next(filter(lambda x: x>=mean_length, ls)))
- magic_numbers = [ls[i] for i in range(mean_length_index, len(ls))]
- magic_numbers[0] = 0
- sum_magic = np.sum(magic_numbers)
- for i in range(2, len(magic_numbers)):
- magic_numbers[i] += magic_numbers[i-1]
- magic_numbers = [m /sum_magic for m in magic_numbers]
-
- rez = []
- for alfa in alfas:
- i = magic_numbers.index(next(filter(lambda z: z > alfa, magic_numbers), magic_numbers[-1]))
- av_length = ls[mean_length_index+i]
-
- lines = {}
-
- for s in tri.simplices:
- used = True
- for ind in tri_ind:
- if lenghts[(pts[s[ind[0]]], pts[s[ind[1]]])] > av_length:
- used = False
- break
- if used == False: continue
-
- for ind in tri_ind:
- i,j= s[ind[0]],s[ind[1]]
- line = (pts[min(i,j)], pts[max(i,j)])
- lines[line] = line in lines
-
- good_lines = []
- for v in lines:
- if not lines[v]:
- good_lines.append(v)
-
- result = shapeToSomePolygons(good_lines)
- result.sort(key=area_of_polygon_crd, reverse=True)
- rez.append(result)
- return rez
-
diff --git a/spaces/Dagfinn1962/stablediffusion-models/main.css b/spaces/Dagfinn1962/stablediffusion-models/main.css
deleted file mode 100644
index 9d9e5d256b872645ee28c1912e2a9d476131f51a..0000000000000000000000000000000000000000
--- a/spaces/Dagfinn1962/stablediffusion-models/main.css
+++ /dev/null
@@ -1,57 +0,0 @@
-body {
- background-color: #214d09;
- width: 100%;
- color: #FFFFFF;
-}
-
-h3 {
- color: #FFFFF;
- text-align: center;
- font-family: verdana;
- font-size: 24px;
- border: 1px solid #FFFFFF;
- border-radius: 10px;
-}
-
-p {
- font-family: verdana;
- font-size: 14px;
-}
-
-label {
- font-family: verdana;
- color: #000000;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #000000;
-}
-
-gr.Textbox {
- font-family: verdana;
- background-color: #279700;
- color: #000000;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #FFFFFF;
- border-radius: 6px;
-}
-
-gr.Botton {
- font-family: verdana;
- background-color: #279700;
- color: #FFFFFF;
- font-weight: 700;
- font-size: 14px;
- border: 1px solid #000000;
- border-radius: 6px;
-}
-
-a a:active a.hover
- {
- font-family: verdana;
- color: #572430;
- text-decoration: none;
- font-weight: 700;
- font-size: 14px;
-
-}
\ No newline at end of file
diff --git a/spaces/DaleChen/AutoGPT/autogpt/config/config.py b/spaces/DaleChen/AutoGPT/autogpt/config/config.py
deleted file mode 100644
index 4b53df10e8d2832be7ffb321d9036aec5a47a79d..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/autogpt/config/config.py
+++ /dev/null
@@ -1,251 +0,0 @@
-"""Configuration class to store the state of bools for different scripts access."""
-import os
-
-import openai
-import yaml
-from colorama import Fore
-from dotenv import load_dotenv
-
-from autogpt.config.singleton import Singleton
-
-load_dotenv(verbose=True)
-
-
-class Config(metaclass=Singleton):
- """
- Configuration class to store the state of bools for different scripts access.
- """
-
- def __init__(self) -> None:
- """Initialize the Config class"""
- self.debug_mode = False
- self.continuous_mode = False
- self.continuous_limit = 0
- self.speak_mode = False
- self.skip_reprompt = False
- self.allow_downloads = False
- self.skip_news = False
-
- self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
- self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
- self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
- self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
- self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
- self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
-
- self.openai_api_key = os.getenv("OPENAI_API_KEY")
- self.temperature = float(os.getenv("TEMPERATURE", "1"))
- self.use_azure = os.getenv("USE_AZURE") == "True"
- self.execute_local_commands = (
- os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
- )
- self.restrict_to_workspace = (
- os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True"
- )
-
- if self.use_azure:
- self.load_azure_config()
- openai.api_type = self.openai_api_type
- openai.api_base = self.openai_api_base
- openai.api_version = self.openai_api_version
-
- self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
- self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
- self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
-
- self.use_mac_os_tts = False
- self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
-
- self.use_brian_tts = False
- self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
-
- self.github_api_key = os.getenv("GITHUB_API_KEY")
- self.github_username = os.getenv("GITHUB_USERNAME")
-
- self.google_api_key = os.getenv("GOOGLE_API_KEY")
- self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
-
- self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
- self.pinecone_region = os.getenv("PINECONE_ENV")
-
- self.weaviate_host = os.getenv("WEAVIATE_HOST")
- self.weaviate_port = os.getenv("WEAVIATE_PORT")
- self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
- self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
- self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
- self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
- self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
- self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
- self.use_weaviate_embedded = (
- os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
- )
-
- # milvus configuration, e.g., localhost:19530.
- self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
- self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
-
- self.image_provider = os.getenv("IMAGE_PROVIDER")
- self.image_size = int(os.getenv("IMAGE_SIZE", 256))
- self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
- self.huggingface_image_model = os.getenv(
- "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
- )
- self.huggingface_audio_to_text_model = os.getenv(
- "HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
- )
- self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860")
- self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH")
-
- # Selenium browser settings
- self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
- self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True"
-
- # User agent header to use when making HTTP requests
- # Some websites might just completely deny request with an error code if
- # no user agent was found.
- self.user_agent = os.getenv(
- "USER_AGENT",
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
- " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
- )
-
- self.redis_host = os.getenv("REDIS_HOST", "localhost")
- self.redis_port = os.getenv("REDIS_PORT", "6379")
- self.redis_password = os.getenv("REDIS_PASSWORD", "")
- self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
- self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
- # Note that indexes must be created on db 0 in redis, this is not configurable.
-
- self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
- # Initialize the OpenAI API client
- openai.api_key = self.openai_api_key
-
- def get_azure_deployment_id_for_model(self, model: str) -> str:
- """
- Returns the relevant deployment id for the model specified.
-
- Parameters:
- model(str): The model to map to the deployment id.
-
- Returns:
- The matching deployment id if found, otherwise an empty string.
- """
- if model == self.fast_llm_model:
- return self.azure_model_to_deployment_id_map[
- "fast_llm_model_deployment_id"
- ] # type: ignore
- elif model == self.smart_llm_model:
- return self.azure_model_to_deployment_id_map[
- "smart_llm_model_deployment_id"
- ] # type: ignore
- elif model == "text-embedding-ada-002":
- return self.azure_model_to_deployment_id_map[
- "embedding_model_deployment_id"
- ] # type: ignore
- else:
- return ""
-
- AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
-
- def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
- """
- Loads the configuration parameters for Azure hosting from the specified file
- path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
-
- Returns:
- None
- """
- try:
- with open(config_file) as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
- except FileNotFoundError:
- config_params = {}
- self.openai_api_type = config_params.get("azure_api_type") or "azure"
- self.openai_api_base = config_params.get("azure_api_base") or ""
- self.openai_api_version = (
- config_params.get("azure_api_version") or "2023-03-15-preview"
- )
- self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
-
- def set_continuous_mode(self, value: bool) -> None:
- """Set the continuous mode value."""
- self.continuous_mode = value
-
- def set_continuous_limit(self, value: int) -> None:
- """Set the continuous limit value."""
- self.continuous_limit = value
-
- def set_speak_mode(self, value: bool) -> None:
- """Set the speak mode value."""
- self.speak_mode = value
-
- def set_fast_llm_model(self, value: str) -> None:
- """Set the fast LLM model value."""
- self.fast_llm_model = value
-
- def set_smart_llm_model(self, value: str) -> None:
- """Set the smart LLM model value."""
- self.smart_llm_model = value
-
- def set_fast_token_limit(self, value: int) -> None:
- """Set the fast token limit value."""
- self.fast_token_limit = value
-
- def set_smart_token_limit(self, value: int) -> None:
- """Set the smart token limit value."""
- self.smart_token_limit = value
-
- def set_browse_chunk_max_length(self, value: int) -> None:
- """Set the browse_website command chunk max length value."""
- self.browse_chunk_max_length = value
-
- def set_openai_api_key(self, value: str) -> None:
- """Set the OpenAI API key value."""
- self.openai_api_key = value
-
- def set_elevenlabs_api_key(self, value: str) -> None:
- """Set the ElevenLabs API key value."""
- self.elevenlabs_api_key = value
-
- def set_elevenlabs_voice_1_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 1 ID value."""
- self.elevenlabs_voice_1_id = value
-
- def set_elevenlabs_voice_2_id(self, value: str) -> None:
- """Set the ElevenLabs Voice 2 ID value."""
- self.elevenlabs_voice_2_id = value
-
- def set_google_api_key(self, value: str) -> None:
- """Set the Google API key value."""
- self.google_api_key = value
-
- def set_custom_search_engine_id(self, value: str) -> None:
- """Set the custom search engine id value."""
- self.custom_search_engine_id = value
-
- def set_pinecone_api_key(self, value: str) -> None:
- """Set the Pinecone API key value."""
- self.pinecone_api_key = value
-
- def set_pinecone_region(self, value: str) -> None:
- """Set the Pinecone region value."""
- self.pinecone_region = value
-
- def set_debug_mode(self, value: bool) -> None:
- """Set the debug mode value."""
- self.debug_mode = value
-
-
-def check_openai_api_key() -> None:
- """Check if the OpenAI API key is set in config.py or as an environment variable."""
- cfg = Config()
- if not cfg.openai_api_key:
- print(
- Fore.RED
- + "Please set your OpenAI API key in .env or as an environment variable."
- )
- print("You can get your key from https://platform.openai.com/account/api-keys")
- exit(1)
diff --git a/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py b/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py
deleted file mode 100644
index ea84efd8ca9489b40919ecd571813fe954b078e3..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py
+++ /dev/null
@@ -1,86 +0,0 @@
-"""ElevenLabs speech module"""
-import os
-
-import requests
-from playsound import playsound
-
-from autogpt.config import Config
-from autogpt.speech.base import VoiceBase
-
-PLACEHOLDERS = {"your-voice-id"}
-
-
-class ElevenLabsSpeech(VoiceBase):
- """ElevenLabs speech class"""
-
- def _setup(self) -> None:
- """Set up the voices, API key, etc.
-
- Returns:
- None: None
- """
-
- cfg = Config()
- default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
- voice_options = {
- "Rachel": "21m00Tcm4TlvDq8ikWAM",
- "Domi": "AZnzlk1XvdvUeBnXmlld",
- "Bella": "EXAVITQu4vr4xnSDxMaL",
- "Antoni": "ErXwobaYiN019PkySvjV",
- "Elli": "MF3mGyEYCl7XYWbV9V6O",
- "Josh": "TxGEqnHWrfWFTfGW9XjX",
- "Arnold": "VR6AewLTigWG4xSOukaG",
- "Adam": "pNInz6obpgDQGcFmaJgB",
- "Sam": "yoZ06aMxZJJ28mfd3POQ",
- }
- self._headers = {
- "Content-Type": "application/json",
- "xi-api-key": cfg.elevenlabs_api_key,
- }
- self._voices = default_voices.copy()
- if cfg.elevenlabs_voice_1_id in voice_options:
- cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id]
- if cfg.elevenlabs_voice_2_id in voice_options:
- cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
- self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
- self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
-
- def _use_custom_voice(self, voice, voice_index) -> None:
- """Use a custom voice if provided and not a placeholder
-
- Args:
- voice (str): The voice ID
- voice_index (int): The voice index
-
- Returns:
- None: None
- """
- # Placeholder values that should be treated as empty
- if voice and voice not in PLACEHOLDERS:
- self._voices[voice_index] = voice
-
- def _speech(self, text: str, voice_index: int = 0) -> bool:
- """Speak text using elevenlabs.io's API
-
- Args:
- text (str): The text to speak
- voice_index (int, optional): The voice to use. Defaults to 0.
-
- Returns:
- bool: True if the request was successful, False otherwise
- """
- tts_url = (
- f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
- )
- response = requests.post(tts_url, headers=self._headers, json={"text": text})
-
- if response.status_code == 200:
- with open("speech.mpeg", "wb") as f:
- f.write(response.content)
- playsound("speech.mpeg", True)
- os.remove("speech.mpeg")
- return True
- else:
- print("Request failed with status code:", response.status_code)
- print("Response content:", response.content)
- return False
diff --git a/spaces/Detomo/ai-avatar-backend/helpers/tts.js b/spaces/Detomo/ai-avatar-backend/helpers/tts.js
deleted file mode 100644
index 8a4f54f7f6e880da4deb7b874f90c92e77c99a75..0000000000000000000000000000000000000000
--- a/spaces/Detomo/ai-avatar-backend/helpers/tts.js
+++ /dev/null
@@ -1,92 +0,0 @@
-// azure-cognitiveservices-speech.js
-require('dotenv').config()
-const sdk = require('microsoft-cognitiveservices-speech-sdk');
-const blendShapeNames = require('./blendshapeNames');
-const _ = require('lodash');
-const voicesMap = {
- 'en-US': 'en-US-AmberNeural',
- 'ja-JP': 'ja-JP-MayuNeural',
- 'vi-VN': 'vi-VN-NamMinhNeural',
-};
-
-let SSML = `
-
-
- __TEXT__
-
-`;
-
-const key = process.env.AZURE_KEY;
-const region = process.env.AZURE_REGION;
-
-/**
- * Node.js server code to convert text to speech
- * @returns stream
- * @param {*} text text to convert to audio/speech
- * @param language
- */
-const textToSpeech = async (text, language)=> {
-
- // convert callback function to promise
- return new Promise((resolve, reject) => {
-
- const voice = voicesMap[language];
- let ssml = SSML.replace("__TEXT__", text).replace("", ``);
-
-
- const speechConfig = sdk.SpeechConfig.fromSubscription(key, region);
- speechConfig.speechSynthesisOutputFormat = 5; // mp3
-
- let audioConfig = null;
-
- // if (filename) {
- let randomString = Math.random().toString(36).slice(2, 7);
- let filename = `./public/speech-${randomString}.mp3`;
- audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename);
- // }
-
- let blendData = [];
- let timeStep = 1/60;
- let timeStamp = 0;
-
- const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);
-
- // Subscribes to viseme received event
- synthesizer.visemeReceived = function (s, e) {
-
- // `Animation` is an xml string for SVG or a json string for blend shapes
- var animation = JSON.parse(e.animation);
-
- _.each(animation.BlendShapes, blendArray => {
-
- let blend = {};
- _.each(blendShapeNames, (shapeName, i) => {
- blend[shapeName] = blendArray[i];
- });
-
- blendData.push({
- time: timeStamp,
- blendshapes: blend
- });
- timeStamp += timeStep;
- });
-
- }
-
-
- synthesizer.speakSsmlAsync(
- ssml,
- result => {
-
- synthesizer.close();
- resolve({blendData, filename: `/speech-${randomString}.mp3`});
-
- },
- error => {
- synthesizer.close();
- reject(error);
- });
- });
-};
-
-module.exports = textToSpeech;
\ No newline at end of file
diff --git a/spaces/Detomo/detect_greeting_app/app.py b/spaces/Detomo/detect_greeting_app/app.py
deleted file mode 100644
index 5b7f4454da1f26ef157d3d2f66a04cd96ae07a6f..0000000000000000000000000000000000000000
--- a/spaces/Detomo/detect_greeting_app/app.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import gradio as gr
-import time
-from faster_whisper import WhisperModel
-from utils import ffmpeg_read, stt, greeting_list
-from sentence_transformers import SentenceTransformer, util
-import torch
-
-whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"]
-audio_model = WhisperModel("base", compute_type="int8", device="cpu")
-text_model = SentenceTransformer('all-MiniLM-L6-v2')
-corpus_embeddings = torch.load('corpus_embeddings.pt')
-model_type = "whisper"
-title= "Greeting detection demo app"
-
-def speech_to_text(upload_audio):
- """
- Transcribe audio using whisper model.
- """
- # Transcribe audio
- if model_type == "whisper":
- transcribe_options = dict(task="transcribe", language="ja", beam_size=5, best_of=5, vad_filter=True)
- segments_raw, info = audio_model.transcribe(upload_audio, **transcribe_options)
- segments = [segment.text for segment in segments_raw]
- return ' '.join(segments)
- else:
- text = stt(upload_audio)
- return text
-
-def voice_detect(audio, recongnize_text=""):
- """
- Transcribe audio using whisper model.
- """
- # time.sleep(2)
- if len(recongnize_text) !=0:
- count_state = int(recongnize_text[0])
- recongnize_text = recongnize_text[1:]
- else:
- count_state = 0
-
- threshold = 0.8
- detect_greeting = 0
- text = speech_to_text(audio)
- if "ご視聴ありがとうございました" in text:
- text = ""
- recongnize_text = recongnize_text + " " + text
- query_embedding = text_model.encode(text, convert_to_tensor=True)
- for greeting in greeting_list:
- if greeting in text:
- detect_greeting = 1
- break
- if detect_greeting == 0:
- hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=1)[0]
- if hits[0]['score'] > threshold:
- detect_greeting = 1
-
- recongnize_state = str(count_state + detect_greeting) + recongnize_text
- return recongnize_text, recongnize_state, count_state
-
-def clear():
- return None, None, None
-
-demo = gr.Blocks(title=title)
-
-with demo:
- gr.Markdown('''
-
-
挨拶カウンター
-
- ''')
- with gr.Row():
- with gr.Column():
- audio_source = gr.Audio(source="microphone", type="filepath", streaming=True)
- state = gr.State(value="")
- with gr.Column():
- greeting_count = gr.Number(label="挨拶回数")
- with gr.Row():
- text_output = gr.Textbox(label="認識されたテキスト")
- audio_source.stream(voice_detect, inputs=[audio_source, state], outputs=[text_output, state, greeting_count])
-
-demo.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/__init__.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Egrt/GCycleGAN/nets/resnest/splat.py b/spaces/Egrt/GCycleGAN/nets/resnest/splat.py
deleted file mode 100644
index c3f21b19ac75534521b9a0eae957e8ee454f1cd4..0000000000000000000000000000000000000000
--- a/spaces/Egrt/GCycleGAN/nets/resnest/splat.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""Split-Attention"""
-
-import torch
-from torch import nn
-import torch.nn.functional as F
-from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU
-from torch.nn.modules.utils import _pair
-
-__all__ = ['SplAtConv2d']
-
-class SplAtConv2d(Module):
- """Split-Attention Conv2d
- """
- def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0),
- dilation=(1, 1), groups=1, bias=True,
- radix=2, reduction_factor=4,
- rectify=False, rectify_avg=False, norm_layer=None,
- dropblock_prob=0.0, **kwargs):
- super(SplAtConv2d, self).__init__()
- padding = _pair(padding)
- self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
- self.rectify_avg = rectify_avg
- inter_channels = max(in_channels*radix//reduction_factor, 32)
- self.radix = radix
- self.cardinality = groups
- self.channels = channels
- self.dropblock_prob = dropblock_prob
- if self.rectify:
- from rfconv import RFConv2d
- self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
- groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs)
- else:
- self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation,
- groups=groups*radix, bias=bias, **kwargs)
- self.use_bn = norm_layer is not None
- if self.use_bn:
- self.bn0 = norm_layer(channels*radix)
- self.relu = ReLU(inplace=True)
- self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
- if self.use_bn:
- self.bn1 = norm_layer(inter_channels)
- self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality)
- if dropblock_prob > 0.0:
- self.dropblock = DropBlock2D(dropblock_prob, 3)
- self.rsoftmax = rSoftMax(radix, groups)
-
- def forward(self, x):
- x = self.conv(x)
- if self.use_bn:
- x = self.bn0(x)
- if self.dropblock_prob > 0.0:
- x = self.dropblock(x)
- x = self.relu(x)
-
- batch, rchannel = x.shape[:2]
- if self.radix > 1:
- if torch.__version__ < '1.5':
- splited = torch.split(x, int(rchannel//self.radix), dim=1)
- else:
- splited = torch.split(x, rchannel//self.radix, dim=1)
- gap = sum(splited)
- else:
- gap = x
- gap = F.adaptive_avg_pool2d(gap, 1)
- gap = self.fc1(gap)
-
- if self.use_bn:
- gap = self.bn1(gap)
- gap = self.relu(gap)
-
- atten = self.fc2(gap)
- atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
-
- if self.radix > 1:
- if torch.__version__ < '1.5':
- attens = torch.split(atten, int(rchannel//self.radix), dim=1)
- else:
- attens = torch.split(atten, rchannel//self.radix, dim=1)
- out = sum([att*split for (att, split) in zip(attens, splited)])
- else:
- out = atten * x
- return out.contiguous()
-
-class rSoftMax(nn.Module):
- def __init__(self, radix, cardinality):
- super().__init__()
- self.radix = radix
- self.cardinality = cardinality
-
- def forward(self, x):
- batch = x.size(0)
- if self.radix > 1:
- x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
- x = F.softmax(x, dim=1)
- x = x.reshape(batch, -1)
- else:
- x = torch.sigmoid(x)
- return x
-
diff --git a/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py b/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py
deleted file mode 100644
index a1904c3bd9b857cf77c507e98e431d4bc50f4251..0000000000000000000000000000000000000000
--- a/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py
+++ /dev/null
@@ -1,176 +0,0 @@
-import random
-
-import gradio as gr
-import matplotlib
-import matplotlib.pyplot as plt
-import pandas as pd
-import shap
-import xgboost as xgb
-from datasets import load_dataset
-
-matplotlib.use("Agg")
-
-dataset = load_dataset("scikit-learn/adult-census-income")
-
-X_train = dataset["train"].to_pandas()
-_ = X_train.pop("fnlwgt")
-_ = X_train.pop("race")
-
-y_train = X_train.pop("income")
-y_train = (y_train == ">50K").astype(int)
-categorical_columns = [
- "workclass",
- "education",
- "marital.status",
- "occupation",
- "relationship",
- "sex",
- "native.country",
-]
-X_train = X_train.astype({col: "category" for col in categorical_columns})
-
-
-data = xgb.DMatrix(X_train, label=y_train, enable_categorical=True)
-model = xgb.train(params={"objective": "binary:logistic"}, dtrain=data)
-explainer = shap.TreeExplainer(model)
-
-
-def predict(*args):
- df = pd.DataFrame([args], columns=X_train.columns)
- df = df.astype({col: "category" for col in categorical_columns})
- pos_pred = model.predict(xgb.DMatrix(df, enable_categorical=True))
- return {">50K": float(pos_pred[0]), "<=50K": 1 - float(pos_pred[0])}
-
-
-def interpret(*args):
- df = pd.DataFrame([args], columns=X_train.columns)
- df = df.astype({col: "category" for col in categorical_columns})
- shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True))
- scores_desc = list(zip(shap_values[0], X_train.columns))
- scores_desc = sorted(scores_desc)
- fig_m = plt.figure(tight_layout=True)
- plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc])
- plt.title("Feature Shap Values")
- plt.ylabel("Shap Value")
- plt.xlabel("Feature")
- plt.tight_layout()
- return fig_m
-
-
-unique_class = sorted(X_train["workclass"].unique())
-unique_education = sorted(X_train["education"].unique())
-unique_marital_status = sorted(X_train["marital.status"].unique())
-unique_relationship = sorted(X_train["relationship"].unique())
-unique_occupation = sorted(X_train["occupation"].unique())
-unique_sex = sorted(X_train["sex"].unique())
-unique_country = sorted(X_train["native.country"].unique())
-
-with gr.Blocks() as demo:
- gr.Markdown("""
- ## Income Classification with XGBoost 💰
-
- This example shows how to load data from the hugging face hub to train an XGBoost classifier and
- demo the predictions with gradio.
-
- The source is [here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability).
- """)
- with gr.Row():
- with gr.Column():
- age = gr.Slider(label="Age", minimum=17, maximum=90, step=1, randomize=True)
- work_class = gr.Dropdown(
- label="Workclass",
- choices=unique_class,
- value=lambda: random.choice(unique_class),
- )
- education = gr.Dropdown(
- label="Education Level",
- choices=unique_education,
- value=lambda: random.choice(unique_education),
- )
- years = gr.Slider(
- label="Years of schooling",
- minimum=1,
- maximum=16,
- step=1,
- randomize=True,
- )
- marital_status = gr.Dropdown(
- label="Marital Status",
- choices=unique_marital_status,
- value=lambda: random.choice(unique_marital_status),
- )
- occupation = gr.Dropdown(
- label="Occupation",
- choices=unique_occupation,
- value=lambda: random.choice(unique_occupation),
- )
- relationship = gr.Dropdown(
- label="Relationship Status",
- choices=unique_relationship,
- value=lambda: random.choice(unique_relationship),
- )
- sex = gr.Dropdown(
- label="Sex", choices=unique_sex, value=lambda: random.choice(unique_sex)
- )
- capital_gain = gr.Slider(
- label="Capital Gain",
- minimum=0,
- maximum=100000,
- step=500,
- randomize=True,
- )
- capital_loss = gr.Slider(
- label="Capital Loss", minimum=0, maximum=10000, step=500, randomize=True
- )
- hours_per_week = gr.Slider(
- label="Hours Per Week Worked", minimum=1, maximum=99, step=1
- )
- country = gr.Dropdown(
- label="Native Country",
- choices=unique_country,
- value=lambda: random.choice(unique_country),
- )
- with gr.Column():
- label = gr.Label()
- plot = gr.Plot()
- with gr.Row():
- predict_btn = gr.Button(value="Predict")
- interpret_btn = gr.Button(value="Interpret")
- predict_btn.click(
- predict,
- inputs=[
- age,
- work_class,
- education,
- years,
- marital_status,
- occupation,
- relationship,
- sex,
- capital_gain,
- capital_loss,
- hours_per_week,
- country,
- ],
- outputs=[label],
- )
- interpret_btn.click(
- interpret,
- inputs=[
- age,
- work_class,
- education,
- years,
- marital_status,
- occupation,
- relationship,
- sex,
- capital_gain,
- capital_loss,
- hours_per_week,
- country,
- ],
- outputs=[plot],
- )
-
-demo.launch()
diff --git a/spaces/Flux9665/IMS-Toucan/README.md b/spaces/Flux9665/IMS-Toucan/README.md
deleted file mode 100644
index 80144846a8e59a50d094b1c404342cc9c3c7e821..0000000000000000000000000000000000000000
--- a/spaces/Flux9665/IMS-Toucan/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Multilingual TTS
-emoji: 🌍🦜
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 2.7.5.2
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md
deleted file mode 100644
index 00ac8ed128577ad7ef30cb839ac40bf7cf5c49ea..0000000000000000000000000000000000000000
--- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-title: Glide Text2im
-emoji: 📊
-colorFrom: purple
-colorTo: gray
-sdk: gradio
-app_file: app.py
-pinned: false
-duplicated_from: valhalla/glide-text2im
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py
deleted file mode 100644
index ee72773c2c891d2dda6d02933e88599b5330b052..0000000000000000000000000000000000000000
--- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py
+++ /dev/null
@@ -1,497 +0,0 @@
-import math
-from collections import OrderedDict
-from typing import List, Optional, Tuple, cast
-
-import attr
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .attention import (
- AttentionInfo,
- DenseAttentionMask,
- DenseCausalAttentionMask,
- make_full_layout,
- to_attention_info,
-)
-from .utils import Affine, LayerNorm, zero_key_bias_grad
-
-# Constants used in the original CLIP implementation.
-image_channel_means = [122.77093945, 116.74601272, 104.09373519]
-image_channel_stds = [68.50053285, 66.63215831, 70.32316309]
-
-
-@attr.s(eq=False, repr=False)
-class TextEmbedding(nn.Module):
- n_vocab: int = attr.ib()
- n_context: int = attr.ib()
- n_state: int = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- w_voc = torch.empty((self.n_vocab, self.n_state), dtype=torch.float32, device=self.device)
- w_pos = torch.empty((self.n_context, self.n_state), dtype=torch.float32, device=self.device)
-
- with torch.no_grad():
- w_voc.normal_(std=0.02)
- w_pos.normal_(std=0.01)
-
- self.w_voc = nn.Parameter(w_voc)
- self.w_pos = nn.Parameter(w_pos)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- if len(x.shape) != 2:
- raise ValueError()
-
- return F.embedding(x, self.w_voc) + self.w_pos[None, :, :]
-
-
-@attr.s(eq=False, repr=False)
-class ImageEmbedding(nn.Module):
- image_size: int = attr.ib()
- patch_size: int = attr.ib()
- n_state: int = attr.ib()
- n_timestep: int = attr.ib(default=0)
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- if self.image_size % self.patch_size != 0:
- raise ValueError()
-
- n_patch = self.image_size // self.patch_size
- patch_proj = torch.empty(
- (self.n_state, 3) + 2 * (self.patch_size,), dtype=torch.float32, device=self.device
- )
- w_pos = torch.empty(
- (1 + n_patch ** 2, self.n_state), dtype=torch.float32, device=self.device
- )
-
- with torch.no_grad():
- if self.n_timestep == 0:
- pred_state = torch.empty((self.n_state,), dtype=torch.float32, device=self.device)
- pred_state.normal_(std=1 / np.sqrt(self.n_state))
- self.pred_state = nn.Parameter(pred_state)
- else:
- w_t = torch.empty(
- (self.n_timestep, self.n_state), dtype=torch.float32, device=self.device
- )
- w_t.normal_(std=1 / np.sqrt(self.n_state))
- self.w_t = nn.Parameter(w_t)
-
- patch_proj.normal_(std=np.sqrt(2 / (self.n_state * self.patch_size ** 2)))
- w_pos.normal_(std=1 / np.sqrt(self.n_state))
-
- self.patch_proj = nn.Parameter(patch_proj)
- self.w_pos = nn.Parameter(w_pos)
-
- self.channel_means = torch.tensor(
- image_channel_means, dtype=torch.float32, device=self.device
- )[None, :, None, None]
- self.channel_stds = torch.tensor(
- image_channel_stds, dtype=torch.float32, device=self.device
- )[None, :, None, None]
- self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
-
- def forward(self, x: torch.Tensor, t: Optional[torch.Tensor] = None) -> torch.Tensor:
- if len(x.shape) != 4:
- raise ValueError("input should be 4d")
- if x.shape[1] != 3:
- raise ValueError("input should have 3 channels")
- if not (x.shape[2] == self.image_size and x.shape[3] == self.image_size):
- raise ValueError(f"input is not {self.image_size} x {self.image_size}")
-
- if (self.n_timestep == 0 and t is not None) or (self.n_timestep != 0 and t is None):
- raise ValueError()
- if self.n_timestep != 0:
- assert t is not None
- if len(t.shape) != 1:
- raise ValueError()
- if t.shape[0] != x.shape[0]:
- raise ValueError()
-
- x = (x - self.channel_means) / self.channel_stds
- x = F.conv2d(x, self.patch_proj, stride=self.patch_size)
- x = x.reshape(x.shape[0], self.n_state, (self.image_size // self.patch_size) ** 2).permute(
- 0, 2, 1
- )
-
- sot = (
- self.pred_state[None, None].expand(x.shape[0], -1, -1)
- if self.n_timestep == 0
- else F.embedding(cast(torch.Tensor, t), self.w_t)[:, None]
- )
- x = torch.cat((sot, x), dim=1) + self.w_pos[None]
- return self.ln(x)
-
-
-@attr.s(eq=False, repr=False)
-class AttentionResblock(nn.Module):
- n_state: int = attr.ib()
- n_resblocks: int = attr.ib()
- attn_fn: AttentionInfo = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.n_head_state = self.n_state // self.attn_fn.n_heads
- self.qk_scale = 1 / np.sqrt(self.n_head_state)
-
- self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
- self.f_q = Affine(
- self.n_state,
- self.n_state,
- std=1 / math.sqrt(self.n_state),
- use_bias=True,
- bias_filter_fn=zero_key_bias_grad,
- device=self.device,
- )
- self.f_k = Affine(
- self.n_state,
- self.n_state,
- std=1 / math.sqrt(self.n_state),
- use_bias=False,
- bias_filter_fn=zero_key_bias_grad,
- device=self.device,
- )
- self.f_v = Affine(
- self.n_state,
- self.n_state,
- std=1 / math.sqrt(self.n_state),
- use_bias=True,
- bias_filter_fn=zero_key_bias_grad,
- device=self.device,
- )
- self.f_c = Affine(
- self.n_state,
- self.n_state,
- use_bias=True,
- std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2),
- device=self.device,
- ) # XXX
-
- def forward(self, m: torch.Tensor) -> torch.Tensor:
- n_context = m.shape[1]
- n_query_pad = self.attn_fn.ctx_blks_q * self.attn_fn.block_size - n_context
- n_key_pad = self.attn_fn.ctx_blks_k * self.attn_fn.block_size - n_context
- assert n_query_pad >= 0
- assert n_key_pad >= 0
-
- r = m
- r = self.ln(r)
- q, k, v = self.f_q(r), self.f_k(r), self.f_v(r)
-
- if n_query_pad != 0:
- q = F.pad(q, (0, 0, 0, n_query_pad))
-
- if n_key_pad != 0:
- k = F.pad(k, (0, 0, 0, n_key_pad))
- v = F.pad(v, (0, 0, 0, n_key_pad))
-
- q = q.view([q.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
- k = k.view([k.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
- v = v.view([v.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3))
- w = torch.einsum(
- "bhcd,bhkd->bhck", q * math.sqrt(self.qk_scale), k * math.sqrt(self.qk_scale)
- )
-
- if hasattr(self.attn_fn, "pytorch_attn_bias"):
- bias = self.attn_fn.pytorch_attn_bias
- assert len(bias.shape) in {2, 3}
-
- if len(bias.shape) == 2:
- w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None, None], dim=-1)
- elif len(bias.shape) == 3:
- w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None], dim=-1)
- else:
- w = torch.softmax(w, dim=-1)
-
- r = torch.einsum("bhck,bhkd->bhcd", w, v)
- r = r.permute((0, 2, 1, 3)).reshape((r.shape[0], -1, self.n_state))
-
- if n_query_pad != 0:
- r = r[:, :-n_query_pad]
-
- assert r.shape[1] == n_context
-
- r = self.f_c(r)
- return m + r
-
-
-@attr.s(eq=False, repr=False)
-class FullyConnectedResblock(nn.Module):
- """
- Not imported from other files because we retain Alec's original inits.
- """
-
- n_state: int = attr.ib()
- n_resblocks: int = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
- self.f_1 = Affine(
- self.n_state,
- 4 * self.n_state,
- use_bias=True,
- std=np.sqrt(2 / (4 * self.n_state)),
- device=self.device,
- )
- self.f_2 = Affine(
- 4 * self.n_state,
- self.n_state,
- use_bias=True,
- std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2),
- device=self.device,
- ) # XXX
-
- def forward(self, m: torch.Tensor) -> torch.Tensor:
- r = m
- r = self.ln(r)
-
- r = self.f_2(F.gelu(self.f_1(r)))
- return m + r
-
-
-@attr.s(eq=False, repr=False)
-class TransformerBlock(nn.Module):
- n_state: int = attr.ib()
- n_resblocks: int = attr.ib()
- attn_fn: AttentionInfo = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.f_attn = AttentionResblock(
- self.n_state,
- self.n_resblocks,
- self.attn_fn,
- self.device,
- )
- self.f_mlp = FullyConnectedResblock(self.n_state, self.n_resblocks, self.device)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- return self.f_mlp(self.f_attn(x))
-
-
-@attr.s(eq=False, repr=False)
-class TextFeatureExtractor(nn.Module):
- n_state: int = attr.ib()
- n_embd: int = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
- self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device)
-
- def forward(
- self, text: torch.Tensor, text_len: torch.Tensor, return_probe_features: bool = False
- ) -> torch.Tensor:
- if len(text.shape) != 3:
- raise ValueError("expected text to be 3d")
- if len(text_len.shape) != 1:
- raise ValueError("expected text length to be 1d")
- if text.shape[0] != text_len.shape[0]:
- raise ValueError("text and text_len have inconsistent batch dimensions")
-
- index = (text_len - 1)[:, None, None].expand(-1, 1, text.shape[2])
- x = torch.gather(text, dim=1, index=index)
- assert list(x.shape) == [text.shape[0], 1, text.shape[2]]
-
- if return_probe_features:
- return x[:, 0]
-
- x = self.ln(x)
- return self.f(x[:, 0])
-
-
-@attr.s(eq=False, repr=False)
-class ImageFeatureExtractor(nn.Module):
- n_state: int = attr.ib()
- n_embd: int = attr.ib()
- device: torch.device = attr.ib(default=torch.device("cuda"))
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device)
- self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device)
-
- def forward(self, x: torch.Tensor, return_probe_features: bool = False) -> torch.Tensor:
- if return_probe_features:
- return x[:, 0]
-
- x = self.ln(x[:, :1])
- return self.f(x[:, 0])
-
-
-@attr.s(eq=False, repr=False)
-class TextEncoder(nn.Module):
- n_bpe_vocab: int = attr.ib()
- max_text_len: int = attr.ib()
- n_embd: int = attr.ib()
- n_head: int = attr.ib()
- n_xf_blocks: int = attr.ib()
- n_head_state: int = attr.ib(default=64)
- device: torch.device = attr.ib(default=torch.device("cuda"))
- block_size: int = attr.ib(init=False, default=32)
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.n_state = self.n_head * self.n_head_state
- n_rounded_context = self.block_size * int(math.ceil(self.max_text_len / self.block_size))
- n_pad = n_rounded_context - self.max_text_len
-
- args = (
- n_rounded_context,
- n_rounded_context,
- self.block_size,
- self.n_head,
- False,
- n_pad,
- n_pad,
- )
- mask = DenseCausalAttentionMask(*args)
- attn_fn = to_attention_info(mask)
-
- m = 1 - make_full_layout(mask).astype(np.float32)
- m[m == 1] = -1e10
- attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device)
-
- blocks: List[Tuple[str, nn.Module]] = [
- (
- "input",
- TextEmbedding(
- self.n_bpe_vocab, self.max_text_len, self.n_state, device=self.device
- ),
- )
- ]
-
- for i in range(self.n_xf_blocks):
- blocks.append(
- (
- f"block_{i}",
- TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device),
- )
- )
-
- blocks.append(
- ("output", TextFeatureExtractor(self.n_state, self.n_embd, device=self.device))
- )
-
- self.blocks = nn.ModuleDict(OrderedDict(blocks))
-
- def forward(
- self,
- text: torch.Tensor,
- text_len: torch.Tensor,
- return_probe_features: bool = False,
- ) -> torch.Tensor:
-
- n_batch = text.shape[0]
- h = self.blocks["input"](text)
-
- for i in range(self.n_xf_blocks):
- h = self.blocks[f"block_{i}"](h)
-
- h = self.blocks["output"](h, text_len, return_probe_features=return_probe_features)
-
- assert list(h.shape) == [
- n_batch,
- self.n_embd if not return_probe_features else self.n_state,
- ]
- return h
-
-
-@attr.s(eq=False, repr=False)
-class ImageEncoder(nn.Module):
- image_size: int = attr.ib()
- patch_size: int = attr.ib()
- n_embd: int = attr.ib()
- n_head: int = attr.ib()
- n_xf_blocks: int = attr.ib()
- n_head_state: int = attr.ib(default=64)
- n_timestep: int = attr.ib(default=0)
- device: torch.device = attr.ib(default=torch.device("cuda"))
- block_size: int = attr.ib(init=False, default=32)
-
- def __attrs_post_init__(self) -> None:
- super().__init__()
-
- self.n_state = self.n_head * self.n_head_state
- self.n_context = 1 + (self.image_size // self.patch_size) ** 2
- n_rounded_context = self.block_size * int(math.ceil(self.n_context / self.block_size))
- n_pad = n_rounded_context - self.n_context
-
- args = (
- n_rounded_context,
- n_rounded_context,
- self.block_size,
- self.n_head,
- False,
- n_pad,
- n_pad,
- )
- mask = DenseAttentionMask(*args)
- attn_fn = to_attention_info(mask)
-
- m = 1 - make_full_layout(mask).astype(np.float32)
- m[m == 1] = -1e10
- attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device)
-
- blocks: List[Tuple[str, nn.Module]] = [
- (
- "input",
- ImageEmbedding(
- self.image_size,
- self.patch_size,
- self.n_state,
- n_timestep=self.n_timestep,
- device=self.device,
- ),
- )
- ]
-
- for i in range(self.n_xf_blocks):
- blocks.append(
- (
- f"block_{i}",
- TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device),
- )
- )
-
- blocks.append(("output", ImageFeatureExtractor(self.n_state, self.n_embd, self.device)))
-
- self.blocks = nn.ModuleDict(OrderedDict(blocks))
-
- def forward(
- self,
- image: torch.Tensor,
- timesteps: Optional[torch.Tensor] = None,
- return_probe_features: bool = False,
- ) -> torch.Tensor:
- n_batch = image.shape[0]
- h = self.blocks["input"](image, t=timesteps)
-
- for i in range(self.n_xf_blocks):
- h = self.blocks[f"block_{i}"](h)
-
- h = self.blocks["output"](h, return_probe_features=return_probe_features)
-
- assert list(h.shape) == [
- n_batch,
- self.n_embd if not return_probe_features else self.n_state,
- ]
-
- return h
diff --git a/spaces/GIZ/SDSN-demo/README.md b/spaces/GIZ/SDSN-demo/README.md
deleted file mode 100644
index 802ed4ec16f79564907c3f8150a0ab4f9cc1de35..0000000000000000000000000000000000000000
--- a/spaces/GIZ/SDSN-demo/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: SDSN Demo
-emoji: 📈
-colorFrom: purple
-colorTo: blue
-sdk: streamlit
-sdk_version: 1.10.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py b/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py
deleted file mode 100644
index 09d8cb9680f71758018bffe82838a763ca46fe31..0000000000000000000000000000000000000000
--- a/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import threading
-
-
-def mutex_wrapper(lock: threading.Lock):
- def wrap(f):
- def func(*args, **kw):
- lock.acquire()
- try:
- return f(*args, **kw)
- finally:
- lock.release()
-
- return func
-
- return wrap
diff --git a/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py b/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py
deleted file mode 100644
index 191963b2a50622700a123b6b945b88ade1ac1ab2..0000000000000000000000000000000000000000
--- a/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import os
-import sys
-import numpy as np
-import hydra
-
-from cliport.dataset import RavensDataset
-from cliport.utils import utils
-from cliport import tasks
-from cliport.environments.environment import Environment
-
-import torch
-
-
-import matplotlib
-import matplotlib.pyplot as plt
-
-
-
-mode = 'train'
-augment = True
-
-### Uncomment the task you want to generate ###
-# task = 'align-rope'
-# task = 'assembling-kits-seq-seen-colors'
-# task = 'assembling-kits-seq-unseen-colors'
-# task = 'assembling-kits-seq-full'
-# task = 'packing-shapes'
-# task = 'packing-boxes-pairs-seen-colors'
-# task = 'packing-boxes-pairs-unseen-colors'
-# task = 'packing-boxes-pairs-full'
-# task = 'packing-seen-google-objects-seq'
-# task = 'packing-unseen-google-objects-seq'
-# task = 'packing-seen-google-objects-group'
-# task = 'packing-unseen-google-objects-group'
-# task = 'put-block-in-bowl-seen-colors'
-# task = 'put-block-in-bowl-unseen-colors'
-# task = 'put-block-in-bowl-full'
-task = 'align-box-corner'
-# task = 'stack-block-pyramid-seq-unseen-colors'
-# task = 'stack-block-pyramid-seq-full'
-# task = 'separating-piles-seen-colors'
-# task = 'separating-piles-unseen-colors'
-# task = 'separating-piles-full'
-# task = 'towers-of-hanoi-seq-seen-colors'
-# task = 'towers-of-hanoi-seq-unseen-colors'
-# task = 'towers-of-hanoi-seq-full'
-
-### visualization settings
-max_episodes = 1
-max_steps = 100
-
-
-
-root_dir = os.environ['CLIPORT_ROOT']
-config_file = 'train.yaml'
-cfg = utils.load_hydra_config(os.path.join(root_dir, f'cliport/cfg/{config_file}'))
-
-# Override defaults
-cfg['task'] = task
-cfg['mode'] = mode
-cfg['train']['data_augmentation'] = True
-data_dir = os.path.join(root_dir, 'data')
-
-
-
-task = tasks.names[cfg['task']]()
-task.mode = mode
-
-ds = RavensDataset(os.path.join(data_dir, f'{cfg["task"]}-{cfg["mode"]}'), cfg, n_demos=10, augment=augment)
-
-
-
-color_sums = []
-depth_sums = []
-
-total_images = 0
-
-for i in range(0, min(max_episodes, ds.n_episodes)):
- print(f'\n\nEpisode: {i + 1}/{ds.n_episodes}')
- episode, seed = ds.load(i)
-
- total_images += len(episode)-1
-
- total_reward = 0
- for step in range(min(max_steps, len(episode))):
- print(f"\nStep: {step+1}/{len(episode)}")
- obs, act, reward, info = episode[step]
-
- total_reward += reward
- batch = ds[i]
-
- num_images = len(obs['color'])
- fig, axs = plt.subplots(2, num_images+1, figsize=(15, 6))
- for n in range(num_images):
- axs[1, n].imshow(obs['color'][n])
- axs[1, n].set_title(f'Raw RGB {n+1}')
-
- axs[0, n].imshow(obs['depth'][n])
- axs[0, n].set_title(f'Raw Depth {n+1}')
-
- color_sums.append(np.mean(obs['color'][0], axis=(0,1)) / 255.0)
- depth_sums.append(np.mean(obs['depth'][0], axis=(0,1)))
-
- cam_config = None
- if b'camera_info' in info:
- cam_config = ds.get_cam_config(info[b'camera_info'])
-
- img_depth = ds.get_image(obs, cam_config=cam_config)
- img_tensor = torch.from_numpy(img_depth)
- img = np.uint8(img_tensor.detach().cpu().numpy())
- img = img.transpose(1,0,2)
-
- if step < len(episode)-1 and episode[step]:
- batch = ds.process_sample(episode[step], augment=augment)
- else:
- batch = ds.process_goal(episode[step], perturb_params=None)
-
- img_sample = batch['img']
- img_sample = torch.from_numpy(img_sample)
- color = np.uint8(img_sample.detach().cpu().numpy())[:,:,:3]
- color = color.transpose(1,0,2)
- depth = np.array(img_sample.detach().cpu().numpy())[:,:,3]
- depth = depth.transpose(1,0)
-
- axs[0, num_images].imshow(depth)
- axs[0, num_images].set_title('Depth')
-
- axs[1,num_images].imshow(color)
- axs[1,num_images].set_title('RGB + Oracle Pick & Place')
-
- if act and step < len(episode)-1:
- p0 = batch['p0']
- p1 = batch['p1']
- p0_theta = batch['p0_theta']
- p1_theta = batch['p1_theta'] + p0_theta
-
- pick = p0
- place = p1
-
- line_len = 30
- pick0 = (pick[0] + line_len/2.0 * np.sin(p0_theta), pick[1] + line_len/2.0 * np.cos(p0_theta))
- pick1 = (pick[0] - line_len/2.0 * np.sin(p0_theta), pick[1] - line_len/2.0 * np.cos(p0_theta))
- axs[1,num_images].plot((pick1[0], pick0[0]), (pick1[1], pick0[1]), color='r', linewidth=2)
-
- place0 = (place[0] + line_len/2.0 * np.sin(p1_theta), place[1] + line_len/2.0 * np.cos(p1_theta))
- place1 = (place[0] - line_len/2.0 * np.sin(p1_theta), place[1] - line_len/2.0 * np.cos(p1_theta))
- axs[1,num_images].plot((place1[0], place0[0]), (place1[1], place0[1]), color='g', linewidth=2)
-
- c_pick = plt.Circle(pick, 3, color='r', fill=False)
- c_place = plt.Circle(place, 3, color='g', fill=False)
-
- axs[1,num_images].add_patch(c_pick)
- axs[1,num_images].add_patch(c_place)
-
- plt.show()
-
- print(f"Language Goal: {batch['lang_goal']}")
- print(f"Step Reward: {reward}")
- print(f"Total Reward: {total_reward}")
-
- print(f"Done, Total Reward: {total_reward}")
-
-print("\n\nDataset Statistics: ")
-print(f"Color Mean: {np.mean(color_sums, axis=0)}, Std: {np.std(color_sums, axis=0)}")
-print(f"Depth Mean: {np.mean(depth_sums, axis=0)}, Std: {np.std(depth_sums, axis=0)}")
-print(f"Total Image-Action Pairs: {total_images}")
\ No newline at end of file
diff --git a/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py b/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py
deleted file mode 100644
index 857f2af29886fca6eb4df506853f446066af7c04..0000000000000000000000000000000000000000
--- a/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import os.path
-import shutil
-import urllib.request
-
-import huggingface_hub
-
-
-class HuBERTManager:
- @staticmethod
- def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'):
- install_dir = os.path.join('data', 'models', 'hubert')
- if not os.path.isdir(install_dir):
- os.makedirs(install_dir, exist_ok=True)
- install_file = os.path.join(install_dir, file_name)
- if not os.path.isfile(install_file):
- print('Downloading HuBERT base model')
- urllib.request.urlretrieve(download_url, install_file)
- print('Downloaded HuBERT')
- return install_file
-
-
- @staticmethod
- def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'):
- install_dir = os.path.join('data', 'models', 'hubert')
- if not os.path.isdir(install_dir):
- os.makedirs(install_dir, exist_ok=True)
- install_file = os.path.join(install_dir, local_file)
- if not os.path.isfile(install_file):
- print('Downloading HuBERT custom tokenizer')
- huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False)
- shutil.move(os.path.join(install_dir, model), install_file)
- print('Downloaded tokenizer')
- return install_file
diff --git a/spaces/Godrose0728/Aisound02/text/__init__.py b/spaces/Godrose0728/Aisound02/text/__init__.py
deleted file mode 100644
index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000
--- a/spaces/Godrose0728/Aisound02/text/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
-
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/Gradio-Blocks/anime-colorization/README.md b/spaces/Gradio-Blocks/anime-colorization/README.md
deleted file mode 100644
index ca77164f07a41a87a016a5fb3d4a1afa7d1923e6..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/anime-colorization/README.md
+++ /dev/null
@@ -1,188 +0,0 @@
----
-title: Anime Colorization
-emoji: 😻
-colorFrom: indigo
-colorTo: pink
-sdk: gradio
-sdk_version: 3.0.5
-app_file: app.py
-pinned: false
-license: mit
----
-
-# Pixel Guide Diffusion For Anime Colorization
-
-
-
-Use denoising diffusion probabilistic model to do the anime colorization task.
-
-v1 test result is in branch [v1_result](https://github.com/HighCWu/pixel-guide-diffusion-for-anime-colorization/tree/v1_result).
-
-The dataset is not clean enough and the sketch as the guide is generated using sketch2keras, so the generalization is not good.
-
-In the future, I may try to use only anime portraits as the target images, and look for some more diverse sketch models.
-
-# Introduction and Usage
-
-Pixel Guide Denoising Diffusion Probabilistic Models ( One Channel Guide Version )
-
-This repo is modified from [improved-diffusion](https://github.com/openai/improved-diffusion).
-
-Use [danbooru-sketch-pair-128x](https://www.kaggle.com/wuhecong/danbooru-sketch-pair-128x) as the dataset. Maybe you should move folders in the dataset first to make guide-target pair dataset.
-
-Modify `train_danbooru*.sh`, `test_danbooru*.sh` to meet your needs.
-
-The model is divided into a 32px part and a super-divided part, which can be cascaded during testing to get the final result. But there is no cascade during training.
-
-QQ Group: 1044867291
-
-Discord: https://discord.gg/YwWcAS47qb
-
-# Original README
-
-# improved-diffusion
-
-This is the codebase for [Improved Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2102.09672).
-
-# Usage
-
-This section of the README walks through how to train and sample from a model.
-
-## Installation
-
-Clone this repository and navigate to it in your terminal. Then run:
-
-```
-pip install -e .
-```
-
-This should install the ~~`improved_diffusion`~~ `pixel_guide_diffusion` python package that the scripts depend on.
-
-## Preparing Data
-
-The training code reads images from a directory of image files. In the [datasets](datasets) folder, we have provided instructions/scripts for preparing these directories for ImageNet, LSUN bedrooms, and CIFAR-10.
-
-For creating your own dataset, simply dump all of your images into a directory with ".jpg", ".jpeg", or ".png" extensions. If you wish to train a class-conditional model, name the files like "mylabel1_XXX.jpg", "mylabel2_YYY.jpg", etc., so that the data loader knows that "mylabel1" and "mylabel2" are the labels. Subdirectories will automatically be enumerated as well, so the images can be organized into a recursive structure (although the directory names will be ignored, and the underscore prefixes are used as names).
-
-The images will automatically be scaled and center-cropped by the data-loading pipeline. Simply pass `--data_dir path/to/images` to the training script, and it will take care of the rest.
-
-## Training
-
-To train your model, you should first decide some hyperparameters. We will split up our hyperparameters into three groups: model architecture, diffusion process, and training flags. Here are some reasonable defaults for a baseline:
-
-```
-MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
-```
-
-Here are some changes we experiment with, and how to set them in the flags:
-
- * **Learned sigmas:** add `--learn_sigma True` to `MODEL_FLAGS`
- * **Cosine schedule:** change `--noise_schedule linear` to `--noise_schedule cosine`
- * **Reweighted VLB:** add `--use_kl True` to `DIFFUSION_FLAGS` and add `--schedule_sampler loss-second-moment` to `TRAIN_FLAGS`.
- * **Class-conditional:** add `--class_cond True` to `MODEL_FLAGS`.
-
-Once you have setup your hyper-parameters, you can run an experiment like so:
-
-```
-python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS
-```
-
-You may also want to train in a distributed manner. In this case, run the same command with `mpiexec`:
-
-```
-mpiexec -n $NUM_GPUS python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS
-```
-
-When training in a distributed manner, you must manually divide the `--batch_size` argument by the number of ranks. In lieu of distributed training, you may use `--microbatch 16` (or `--microbatch 1` in extreme memory-limited cases) to reduce memory usage.
-
-The logs and saved models will be written to a logging directory determined by the `OPENAI_LOGDIR` environment variable. If it is not set, then a temporary directory will be created in `/tmp`.
-
-## Sampling
-
-The above training script saves checkpoints to `.pt` files in the logging directory. These checkpoints will have names like `ema_0.9999_200000.pt` and `model200000.pt`. You will likely want to sample from the EMA models, since those produce much better samples.
-
-Once you have a path to your model, you can generate a large batch of samples like so:
-
-```
-python scripts/image_sample.py --model_path /path/to/model.pt $MODEL_FLAGS $DIFFUSION_FLAGS
-```
-
-Again, this will save results to a logging directory. Samples are saved as a large `npz` file, where `arr_0` in the file is a large batch of samples.
-
-Just like for training, you can run `image_sample.py` through MPI to use multiple GPUs and machines.
-
-You can change the number of sampling steps using the `--timestep_respacing` argument. For example, `--timestep_respacing 250` uses 250 steps to sample. Passing `--timestep_respacing ddim250` is similar, but uses the uniform stride from the [DDIM paper](https://arxiv.org/abs/2010.02502) rather than our stride.
-
-To sample using [DDIM](https://arxiv.org/abs/2010.02502), pass `--use_ddim True`.
-
-## Models and Hyperparameters
-
-This section includes model checkpoints and run flags for the main models in the paper.
-
-Note that the batch sizes are specified for single-GPU training, even though most of these runs will not naturally fit on a single GPU. To address this, either set `--microbatch` to a small value (e.g. 4) to train on one GPU, or run with MPI and divide `--batch_size` by the number of GPUs.
-
-Unconditional ImageNet-64 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_100M_1500K.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
-```
-
-Unconditional CIFAR-10 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_50M_500K.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
-```
-
-Class-conditional ImageNet-64 model (270M parameters, trained for 250K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_cond_270M_250K.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 64 --num_channels 192 --num_res_blocks 3 --learn_sigma True --class_cond True"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine --rescale_learned_sigmas False --rescale_timesteps False"
-TRAIN_FLAGS="--lr 3e-4 --batch_size 2048"
-```
-
-Upsampling 256x256 model (280M parameters, trained for 500K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/upsample_cond_500K.pt)]:
-
-```bash
-MODEL_FLAGS="--num_channels 192 --num_res_blocks 2 --learn_sigma True --class_cond True"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False"
-TRAIN_FLAGS="--lr 3e-4 --batch_size 256"
-```
-
-LSUN bedroom model (lr=1e-4) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_1200K_bs128.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16"
-DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128"
-```
-
-LSUN bedroom model (lr=2e-5) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_2400K_bs64.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16"
-DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False --use_scale_shift_norm False"
-TRAIN_FLAGS="--lr 2e-5 --batch_size 128"
-```
-
-Unconditional ImageNet-64 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_vlb_100M_1500K.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment"
-```
-
-Unconditional CIFAR-10 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_vlb_50M_500K.pt)]:
-
-```bash
-MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3"
-DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine"
-TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment"
-```
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py
deleted file mode 100644
index 169278e5738b0abd4ae5e99594e4adbaaefa2d96..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
-# learning policy
-lr_config = dict(step=[28, 34])
-runner = dict(type='EpochBasedRunner', max_epochs=36)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py
deleted file mode 100644
index 2bd81abcdc62debc14772659d7a171f20bf33364..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import torch
-
-from ..builder import BBOX_SAMPLERS
-from .base_sampler import BaseSampler
-from .sampling_result import SamplingResult
-
-
-@BBOX_SAMPLERS.register_module()
-class PseudoSampler(BaseSampler):
- """A pseudo sampler that does not do sampling actually."""
-
- def __init__(self, **kwargs):
- pass
-
- def _sample_pos(self, **kwargs):
- """Sample positive samples."""
- raise NotImplementedError
-
- def _sample_neg(self, **kwargs):
- """Sample negative samples."""
- raise NotImplementedError
-
- def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
- """Directly returns the positive and negative indices of samples.
-
- Args:
- assign_result (:obj:`AssignResult`): Assigned results
- bboxes (torch.Tensor): Bounding boxes
- gt_bboxes (torch.Tensor): Ground truth boxes
-
- Returns:
- :obj:`SamplingResult`: sampler results
- """
- pos_inds = torch.nonzero(
- assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
- neg_inds = torch.nonzero(
- assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
- gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
- sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
- assign_result, gt_flags)
- return sampling_result
diff --git a/spaces/HESOAYM/ElviraMulti/modules/shared.py b/spaces/HESOAYM/ElviraMulti/modules/shared.py
deleted file mode 100644
index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000
--- a/spaces/HESOAYM/ElviraMulti/modules/shared.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST
-import os
-import queue
-
-class State:
- interrupted = False
- multi_api_key = False
- completion_url = COMPLETION_URL
- balance_api_url = BALANCE_API_URL
- usage_api_url = USAGE_API_URL
-
- def interrupt(self):
- self.interrupted = True
-
- def recover(self):
- self.interrupted = False
-
- def set_api_host(self, api_host):
- self.completion_url = f"https://{api_host}/v1/chat/completions"
- self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants"
- self.usage_api_url = f"https://{api_host}/dashboard/billing/usage"
- os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1"
-
- def reset_api_host(self):
- self.completion_url = COMPLETION_URL
- self.balance_api_url = BALANCE_API_URL
- self.usage_api_url = USAGE_API_URL
- os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1"
- return API_HOST
-
- def reset_all(self):
- self.interrupted = False
- self.completion_url = COMPLETION_URL
-
- def set_api_key_queue(self, api_key_list):
- self.multi_api_key = True
- self.api_key_queue = queue.Queue()
- for api_key in api_key_list:
- self.api_key_queue.put(api_key)
-
- def switching_api_key(self, func):
- if not hasattr(self, "api_key_queue"):
- return func
-
- def wrapped(*args, **kwargs):
- api_key = self.api_key_queue.get()
- args[0].api_key = api_key
- ret = func(*args, **kwargs)
- self.api_key_queue.put(api_key)
- return ret
-
- return wrapped
-
-
-state = State()
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py
deleted file mode 100644
index 602ea737ed75a33fddf44dd859e999ecfce2730d..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py
+++ /dev/null
@@ -1,401 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import json
-import os
-import tempfile
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.data import (
- Dictionary,
- IdDataset,
- ListDataset,
- NestedDictionaryDataset,
- NumelDataset,
- NumSamplesDataset,
- PadDataset,
- SortDataset,
- data_utils,
- encoders,
-)
-from fairseq.tasks import LegacyFairseqTask, register_task
-
-from . import wsc_utils
-
-
-@register_task("wsc")
-class WSCTask(LegacyFairseqTask):
- """Task to finetune RoBERTa for Winograd Schemas."""
-
- @staticmethod
- def add_args(parser):
- """Add task-specific arguments to the parser."""
- parser.add_argument(
- "data", metavar="DIR", help="path to data directory; we load .jsonl"
- )
- parser.add_argument(
- "--init-token",
- type=int,
- default=None,
- help="add token at the beginning of each batch item",
- )
-
- def __init__(self, args, vocab):
- super().__init__(args)
- self.vocab = vocab
- self.mask = vocab.add_symbol("")
-
- self.bpe = encoders.build_bpe(args)
- self.tokenizer = encoders.build_tokenizer(args)
-
- # hack to handle GPT-2 BPE, which includes leading spaces
- if args.bpe == "gpt2":
- self.leading_space = True
- self.trailing_space = False
- else:
- self.leading_space = False
- self.trailing_space = True
-
- @classmethod
- def load_dictionary(cls, filename):
- """Load the dictionary from the filename
-
- Args:
- filename (str): the filename
- """
- dictionary = Dictionary.load(filename)
- dictionary.add_symbol("")
- return dictionary
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- assert args.criterion == "wsc", "Must set --criterion=wsc"
-
- # load data and label dictionaries
- vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
- print("| dictionary: {} types".format(len(vocab)))
-
- return cls(args, vocab)
-
- def binarize(self, s: str, append_eos: bool = False):
- if self.tokenizer is not None:
- s = self.tokenizer.encode(s)
- if self.bpe is not None:
- s = self.bpe.encode(s)
- tokens = self.vocab.encode_line(
- s,
- append_eos=append_eos,
- add_if_not_exist=False,
- ).long()
- if self.args.init_token is not None:
- tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
- return tokens
-
- def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
- toks = self.binarize(
- prefix + leading_space + txt + trailing_space + suffix,
- append_eos=True,
- )
- mask = torch.zeros_like(toks, dtype=torch.bool)
- mask_start = len(self.binarize(prefix))
- mask_size = len(self.binarize(leading_space + txt))
- mask[mask_start : mask_start + mask_size] = 1
- return toks, mask
-
- def load_dataset(
- self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
- ):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- if data_path is None:
- data_path = os.path.join(self.args.data, split + ".jsonl")
- if not os.path.exists(data_path):
- raise FileNotFoundError("Cannot find data: {}".format(data_path))
-
- query_tokens = []
- query_masks = []
- query_lengths = []
- candidate_tokens = []
- candidate_masks = []
- candidate_lengths = []
- labels = []
-
- for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
- prefix = sentence[: pronoun_span.start].text
- suffix = sentence[pronoun_span.end :].text_with_ws
-
- # spaCy spans include trailing spaces, but we need to know about
- # leading spaces for the GPT-2 BPE
- leading_space = (
- " " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else ""
- )
- trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else ""
-
- # get noun phrases, excluding pronouns and anything overlapping with the query
- cand_spans = wsc_utils.filter_noun_chunks(
- wsc_utils.extended_noun_chunks(sentence),
- exclude_pronouns=True,
- exclude_query=query,
- exact_match=False,
- )
-
- if query is not None:
- query_toks, query_mask = self.binarize_with_mask(
- query, prefix, suffix, leading_space, trailing_space
- )
- query_len = len(query_toks)
- else:
- query_toks, query_mask, query_len = None, None, 0
-
- query_tokens.append(query_toks)
- query_masks.append(query_mask)
- query_lengths.append(query_len)
-
- cand_toks, cand_masks = [], []
- for cand_span in cand_spans:
- toks, mask = self.binarize_with_mask(
- cand_span.text,
- prefix,
- suffix,
- leading_space,
- trailing_space,
- )
- cand_toks.append(toks)
- cand_masks.append(mask)
-
- # collate candidates
- cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
- cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
- assert cand_toks.size() == cand_masks.size()
-
- candidate_tokens.append(cand_toks)
- candidate_masks.append(cand_masks)
- candidate_lengths.append(cand_toks.size(1))
-
- labels.append(label)
-
- query_lengths = np.array(query_lengths)
- query_tokens = ListDataset(query_tokens, query_lengths)
- query_masks = ListDataset(query_masks, query_lengths)
-
- candidate_lengths = np.array(candidate_lengths)
- candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
- candidate_masks = ListDataset(candidate_masks, candidate_lengths)
-
- labels = ListDataset(labels, [1] * len(labels))
-
- dataset = {
- "id": IdDataset(),
- "query_tokens": query_tokens,
- "query_masks": query_masks,
- "candidate_tokens": candidate_tokens,
- "candidate_masks": candidate_masks,
- "labels": labels,
- "nsentences": NumSamplesDataset(),
- "ntokens": NumelDataset(query_tokens, reduce=True),
- }
-
- nested_dataset = NestedDictionaryDataset(
- dataset,
- sizes=[query_lengths],
- )
-
- with data_utils.numpy_seed(self.args.seed):
- shuffle = np.random.permutation(len(query_tokens))
- dataset = SortDataset(
- nested_dataset,
- # shuffle
- sort_order=[shuffle],
- )
-
- if return_only:
- return dataset
-
- self.datasets[split] = dataset
- return self.datasets[split]
-
- def build_dataset_for_inference(self, sample_json):
- with tempfile.NamedTemporaryFile(buffering=0) as h:
- h.write((json.dumps(sample_json) + "\n").encode("utf-8"))
- dataset = self.load_dataset(
- "disambiguate_pronoun",
- data_path=h.name,
- return_only=True,
- )
- return dataset
-
- def disambiguate_pronoun(self, model, sentence, use_cuda=False):
- sample_json = wsc_utils.convert_sentence_to_json(sentence)
- dataset = self.build_dataset_for_inference(sample_json)
- sample = dataset.collater([dataset[0]])
- if use_cuda:
- sample = utils.move_to_cuda(sample)
-
- def get_masked_input(tokens, mask):
- masked_tokens = tokens.clone()
- masked_tokens[mask.bool()] = self.mask
- return masked_tokens
-
- def get_lprobs(tokens, mask):
- logits, _ = model(src_tokens=get_masked_input(tokens, mask))
- lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
- scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
- mask = mask.type_as(scores)
- scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
- return scores
-
- cand_lprobs = get_lprobs(
- sample["candidate_tokens"][0],
- sample["candidate_masks"][0],
- )
- if sample["query_tokens"][0] is not None:
- query_lprobs = get_lprobs(
- sample["query_tokens"][0].unsqueeze(0),
- sample["query_masks"][0].unsqueeze(0),
- )
- return (query_lprobs >= cand_lprobs).all().item() == 1
- else:
- best_idx = cand_lprobs.argmax().item()
- full_cand = sample["candidate_tokens"][0][best_idx]
- mask = sample["candidate_masks"][0][best_idx]
- toks = full_cand[mask.bool()]
- return self.bpe.decode(self.source_dictionary.string(toks)).strip()
-
- @property
- def source_dictionary(self):
- return self.vocab
-
- @property
- def target_dictionary(self):
- return self.vocab
-
-
-@register_task("winogrande")
-class WinograndeTask(WSCTask):
- """
- Task for WinoGrande dataset. Efficient implementation for Winograd schema
- tasks with exactly two candidates, one of which is correct.
- """
-
- @classmethod
- def setup_task(cls, args, **kwargs):
- assert args.criterion == "winogrande", "Must set --criterion=winogrande"
-
- # load data and label dictionaries
- vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt"))
- print("| dictionary: {} types".format(len(vocab)))
-
- return cls(args, vocab)
-
- def load_dataset(
- self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs
- ):
- """Load a given dataset split.
-
- Args:
- split (str): name of the split (e.g., train, valid, test)
- """
- if data_path is None:
- data_path = os.path.join(self.args.data, split + ".jsonl")
- if not os.path.exists(data_path):
- raise FileNotFoundError("Cannot find data: {}".format(data_path))
-
- query_tokens = []
- query_masks = []
- query_lengths = []
- candidate_tokens = []
- candidate_masks = []
- candidate_lengths = []
-
- itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test"))
-
- for sample in itr:
- sentence, pronoun_span, query, cand_text = sample
- prefix = sentence[: pronoun_span[0]].rstrip()
- suffix = sentence[pronoun_span[1] :]
-
- leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else ""
- trailing_space = ""
-
- if query is not None:
- query_toks, query_mask = self.binarize_with_mask(
- query,
- prefix,
- suffix,
- leading_space,
- trailing_space,
- )
- query_len = len(query_toks)
- else:
- query_toks, query_mask, query_len = None, None, 0
-
- query_tokens.append(query_toks)
- query_masks.append(query_mask)
- query_lengths.append(query_len)
-
- cand_toks, cand_mask = self.binarize_with_mask(
- cand_text,
- prefix,
- suffix,
- leading_space,
- trailing_space,
- )
-
- candidate_tokens.append(cand_toks)
- candidate_masks.append(cand_mask)
- candidate_lengths.append(cand_toks.size(0))
-
- query_lengths = np.array(query_lengths)
-
- def get_pad_dataset_fn(tokens, length, pad_idx):
- return PadDataset(
- ListDataset(tokens, length),
- pad_idx=pad_idx,
- left_pad=False,
- )
-
- query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
- query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
-
- candidate_lengths = np.array(candidate_lengths)
- candidate_tokens = get_pad_dataset_fn(
- candidate_tokens, candidate_lengths, self.vocab.pad()
- )
- candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
-
- dataset = {
- "id": IdDataset(),
- "query_tokens": query_tokens,
- "query_masks": query_masks,
- "candidate_tokens": candidate_tokens,
- "candidate_masks": candidate_masks,
- "nsentences": NumSamplesDataset(),
- "ntokens": NumelDataset(query_tokens, reduce=True),
- }
-
- nested_dataset = NestedDictionaryDataset(
- dataset,
- sizes=[query_lengths],
- )
-
- with data_utils.numpy_seed(self.args.seed):
- shuffle = np.random.permutation(len(query_tokens))
- dataset = SortDataset(
- nested_dataset,
- # shuffle
- sort_order=[shuffle],
- )
-
- if return_only:
- return dataset
-
- self.datasets[split] = dataset
- return self.datasets[split]
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py
deleted file mode 100644
index c613f52d3c3de43a048849a231a9a34e2a883486..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py
+++ /dev/null
@@ -1,192 +0,0 @@
-import soundfile as sf
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class CpcFeatureReader:
- """
- Wrapper class to run inference on CPC model.
- Helps extract features for a given audio file.
- """
-
- def __init__(
- self,
- checkpoint_path,
- layer,
- use_encoder_layer=False,
- norm_features=False,
- sample_rate=16000,
- max_chunk=64000,
- ):
- self.model = load_cpc_model(checkpoint_path, layer).eval().cuda()
- self.sample_rate = sample_rate
- self.max_chunk = max_chunk
- self.norm_features = norm_features
- self.use_encoder_layer = use_encoder_layer
-
- def read_audio(self, path, ref_len=None):
- wav, sr = sf.read(path)
- if wav.ndim == 2:
- wav = wav.mean(-1)
- assert wav.ndim == 1, wav.ndim
- assert sr == self.sample_rate, sr
- if ref_len is not None and abs(ref_len - len(wav)) > 160:
- print(f"ref {ref_len} != read {len(wav)} ({path})")
- return wav
-
- def get_feats(self, file_path, ref_len=None):
- x = self.read_audio(file_path, ref_len)
- # Inspired from CPC_audio feature_loader.py
- with torch.no_grad():
- x = torch.from_numpy(x).float().cuda()
- x = x.view(1, 1, -1)
- size = x.size(2)
- feat = []
- start = 0
- while start < size:
- if start + self.max_chunk > size:
- break
- x_chunk = x[..., start : start + self.max_chunk]
- feat_chunk = self.model.extract_features(
- source=x_chunk,
- get_encoded=self.use_encoder_layer,
- norm_output=self.norm_features,
- )
- feat.append(feat_chunk)
- start += self.max_chunk
-
- if start < size:
- x_chunk = x[:, -self.max_chunk :]
- feat_chunk = self.model.extract_features(
- source=x_chunk,
- get_encoded=self.use_encoder_layer,
- norm_output=self.norm_features,
- )
- df = x_chunk.size(2) // feat_chunk.size(1)
- delta = (size - start) // df
- feat.append(feat_chunk[:, -delta:])
- return torch.cat(feat, 1).squeeze(0)
-
-
-def load_cpc_model(checkpoint_path, layer=None):
- state_dict = torch.load(checkpoint_path)
- weights = state_dict["weights"]
- config = state_dict["config"]
- if layer is not None:
- config["nLevelsGRU"] = layer
-
- encoder = CPCEncoder(config["hiddenEncoder"])
- ar_net = CPCAR(
- config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"]
- )
-
- model = CPCModel(encoder, ar_net)
- model.load_state_dict(weights, strict=False)
- model.config = config
-
- return model
-
-
-class ChannelNorm(nn.Module):
- def __init__(self, num_features, epsilon=1e-05, affine=True):
- super(ChannelNorm, self).__init__()
- if affine:
- self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
- self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1))
- else:
- self.weight = None
- self.bias = None
- self.epsilon = epsilon
- self.p = 0
- self.affine = affine
- self.reset_parameters()
-
- def reset_parameters(self):
- if self.affine:
- torch.nn.init.ones_(self.weight)
- torch.nn.init.zeros_(self.bias)
-
- def forward(self, x):
- cum_mean = x.mean(dim=1, keepdim=True)
- cum_var = x.var(dim=1, keepdim=True)
- x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon)
- if self.weight is not None:
- x = x * self.weight + self.bias
- return x
-
-
-class CPCEncoder(nn.Module):
- def __init__(self, hidden_dim=512):
- super(CPCEncoder, self).__init__()
- self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3)
- self.batchNorm0 = ChannelNorm(hidden_dim)
- self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2)
- self.batchNorm1 = ChannelNorm(hidden_dim)
- self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
- self.batchNorm2 = ChannelNorm(hidden_dim)
- self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
- self.batchNorm3 = ChannelNorm(hidden_dim)
- self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1)
- self.batchNorm4 = ChannelNorm(hidden_dim)
- self.DOWNSAMPLING = 160
-
- def get_output_dim(self):
- return self.conv4.out_channels
-
- def forward(self, x):
- x = F.relu(self.batchNorm0(self.conv0(x)))
- x = F.relu(self.batchNorm1(self.conv1(x)))
- x = F.relu(self.batchNorm2(self.conv2(x)))
- x = F.relu(self.batchNorm3(self.conv3(x)))
- x = F.relu(self.batchNorm4(self.conv4(x)))
- return x
-
-
-class CPCAR(nn.Module):
- def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers):
- super(CPCAR, self).__init__()
- self.baseNet = nn.LSTM(
- dim_encoded, dim_output, num_layers=num_layers, batch_first=True
- )
- self.hidden = None
- self.keep_hidden = keep_hidden
-
- def get_output_dim(self):
- return self.baseNet.hidden_size
-
- def forward(self, x):
- try:
- self.baseNet.flatten_parameters()
- except RuntimeError:
- pass
- x, h = self.baseNet(x, self.hidden)
- if self.keep_hidden:
- if isinstance(h, tuple):
- self.hidden = tuple(x.detach() for x in h)
- else:
- self.hidden = h.detach()
- return x
-
-
-class CPCModel(nn.Module):
- def __init__(self, encoder, ar_net):
- super(CPCModel, self).__init__()
- self.gEncoder = encoder
- self.gAR = ar_net
- self.config = None
-
- def forward(self, x, label):
- encoded = self.gEncoder(x).permute(0, 2, 1)
- cpc_feature = self.gAR(encoded)
- return cpc_feature, encoded, label
-
- def extract_features(self, source, get_encoded=False, norm_output=False):
- cpc_feature, encoded, _ = self.forward(source, None)
- if get_encoded:
- cpc_feature = encoded
- if norm_output:
- mean = cpc_feature.mean(dim=1, keepdim=True)
- var = cpc_feature.var(dim=1, keepdim=True)
- cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08)
- return cpc_feature
diff --git a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/tokenize/__init__.py b/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/tokenize/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py
deleted file mode 100644
index 29104f4d8029524a80d6fa649b69a8acec0b8abc..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-import io
-import sys
-import codecs
-import argparse
-
-from .learn_bpe import learn_bpe
-from .apply_bpe import BPE, read_vocabulary
-from .get_vocab import get_vocab
-from .learn_joint_bpe_and_vocab import learn_joint_bpe_and_vocab
-
-from .learn_bpe import create_parser as create_learn_bpe_parser
-from .apply_bpe import create_parser as create_apply_bpe_parser
-from .get_vocab import create_parser as create_get_vocab_parser
-from .learn_joint_bpe_and_vocab import create_parser as create_learn_joint_bpe_and_vocab_parser
-
-# hack for python2/3 compatibility
-argparse.open = io.open
-
-def main():
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="subword-nmt: unsupervised word segmentation for neural machine translation and text generation ")
- subparsers = parser.add_subparsers(dest='command',
- help="""command to run. Run one of the commands with '-h' for more info.
-
-learn-bpe: learn BPE merge operations on input text.
-apply-bpe: apply given BPE operations to input text.
-get-vocab: extract vocabulary and word frequencies from input text.
-learn-joint-bpe-and-vocab: executes recommended workflow for joint BPE.""")
-
- learn_bpe_parser = create_learn_bpe_parser(subparsers)
- apply_bpe_parser = create_apply_bpe_parser(subparsers)
- get_vocab_parser = create_get_vocab_parser(subparsers)
- learn_joint_bpe_and_vocab_parser = create_learn_joint_bpe_and_vocab_parser(subparsers)
-
- args = parser.parse_args()
-
- if args.command == 'learn-bpe':
- # read/write files as UTF-8
- if args.input.name != '':
- args.input = codecs.open(args.input.name, encoding='utf-8')
- if args.output.name != '':
- args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
-
- learn_bpe(args.input, args.output, args.symbols, args.min_frequency, args.verbose,
- is_dict=args.dict_input, total_symbols=args.total_symbols)
- elif args.command == 'apply-bpe':
- # read/write files as UTF-8
- args.codes = codecs.open(args.codes.name, encoding='utf-8')
- if args.input.name != '':
- args.input = codecs.open(args.input.name, encoding='utf-8')
- if args.output.name != '':
- args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
- if args.vocabulary:
- args.vocabulary = codecs.open(args.vocabulary.name, encoding='utf-8')
-
- if args.vocabulary:
- vocabulary = read_vocabulary(args.vocabulary, args.vocabulary_threshold)
- else:
- vocabulary = None
-
- if sys.version_info < (3, 0):
- args.separator = args.separator.decode('UTF-8')
- if args.glossaries:
- args.glossaries = [g.decode('UTF-8') for g in args.glossaries]
-
- bpe = BPE(args.codes, args.merges, args.separator, vocabulary, args.glossaries)
-
- for line in args.input:
- args.output.write(bpe.process_line(line, args.dropout))
-
- elif args.command == 'get-vocab':
- if args.input.name != '':
- args.input = codecs.open(args.input.name, encoding='utf-8')
- if args.output.name != '':
- args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
- get_vocab(args.input, args.output)
- elif args.command == 'learn-joint-bpe-and-vocab':
- learn_joint_bpe_and_vocab(args)
- if sys.version_info < (3, 0):
- args.separator = args.separator.decode('UTF-8')
- else:
- raise Exception('Invalid command provided')
-
-
-# python 2/3 compatibility
-if sys.version_info < (3, 0):
- sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
- sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
- sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
-else:
- sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
- sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
- sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
diff --git a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py b/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py
deleted file mode 100644
index 66c797cc583b6dadc1903194919a8faea509be0d..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py
+++ /dev/null
@@ -1,301 +0,0 @@
-#
-# Copyright (c) 2013-present, Anoop Kunchukuttan
-# All rights reserved.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-#
-
-import pandas as pd
-import numpy as np
-import os
-
-from indicnlp import common
-from indicnlp.common import IndicNlpException
-from indicnlp import langinfo as li
-
-###
-# Phonetic Information about script characters
-###
-
-""" Phonetic data about all languages except Tamil """
-ALL_PHONETIC_DATA=None
-
-""" Phonetic data for Tamil """
-TAMIL_PHONETIC_DATA=None
-
-""" Phonetic vector for all languages except Tamil """
-ALL_PHONETIC_VECTORS=None
-
-""" Phonetic vector for Tamil """
-TAMIL_PHONETIC_VECTORS=None
-
-""" Length of phonetic vector """
-PHONETIC_VECTOR_LENGTH=38
-
-""" Start offset for the phonetic feature vector in the phonetic data vector """
-PHONETIC_VECTOR_START_OFFSET=6
-
-## PHONETIC PROPERTIES in order in which they occur in the vector
-## This list must be in sync with the keys in the PV_PROP_RANGES dictionary
-PV_PROP=['basic_type',
- 'vowel_length',
- 'vowel_strength',
- 'vowel_status',
- 'consonant_type',
- 'articulation_place',
- 'aspiration',
- 'voicing',
- 'nasalization',
- 'vowel_horizontal',
- 'vowel_vertical',
- 'vowel_roundness',
- ]
-
-###
-# Bit vector ranges for various properties
-###
-
-PV_PROP_RANGES={
- 'basic_type': [0,6],
- 'vowel_length': [6,8],
- 'vowel_strength': [8,11],
- 'vowel_status': [11,13],
- 'consonant_type': [13,18],
- 'articulation_place': [18,23],
- 'aspiration': [23,25],
- 'voicing': [25,27],
- 'nasalization': [27,29],
- 'vowel_horizontal': [29,32],
- 'vowel_vertical': [32,36],
- 'vowel_roundness': [36,38],
- }
-
-
-####
-# Indexes into the Phonetic Vector
-####
-PVIDX_BT_VOWEL=0
-PVIDX_BT_CONSONANT=1
-PVIDX_BT_NUKTA=2
-PVIDX_BT_HALANT=3
-PVIDX_BT_ANUSVAAR=4
-PVIDX_BT_MISC=5
-PVIDX_BT_S=PVIDX_BT_VOWEL
-PVIDX_BT_E=PVIDX_BT_MISC+1
-
-PVIDX_VSTAT_DEP=12
-
-#####
-# Unicode information about characters
-#####
-
-SCRIPT_OFFSET_START=0
-SCRIPT_OFFSET_RANGE=0x80
-
-def init():
- """
- To be called by library loader, do not call it in your program
- """
-
- global ALL_PHONETIC_DATA, ALL_PHONETIC_VECTORS, TAMIL_PHONETIC_DATA, TAMIL_PHONETIC_VECTORS, PHONETIC_VECTOR_LENGTH, PHONETIC_VECTOR_START_OFFSET
-
- ALL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','all_script_phonetic_data.csv'),encoding='utf-8')
- TAMIL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','tamil_script_phonetic_data.csv'),encoding='utf-8')
-
- ALL_PHONETIC_VECTORS= ALL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values
- TAMIL_PHONETIC_VECTORS=TAMIL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values
-
- PHONETIC_VECTOR_LENGTH=ALL_PHONETIC_VECTORS.shape[1]
-
-def is_supported_language(lang):
- return lang in list(li.SCRIPT_RANGES.keys())
-
-def get_offset(c,lang):
- if not is_supported_language(lang):
- raise IndicNlpException('Language {} not supported'.format(lang))
- return ord(c)-li.SCRIPT_RANGES[lang][0]
-
-def offset_to_char(off,lang):
- """
- Applicable to Brahmi derived Indic scripts
- """
- if not is_supported_language(lang):
- raise IndicNlpException('Language {} not supported'.format(lang))
- return chr(off+li.SCRIPT_RANGES[lang][0])
-
-def is_indiclang_char(c,lang):
- """
- Applicable to Brahmi derived Indic scripts
- Note that DANDA and DOUBLE_DANDA have the same Unicode codepoint for all Indic scripts
- """
- if not is_supported_language(lang):
- raise IndicNlpException('Language {} not supported'.format(lang))
- o=get_offset(c,lang)
- return (o>=SCRIPT_OFFSET_START and o=li.COORDINATED_RANGE_START_INCLUSIVE and c_offset<=li.COORDINATED_RANGE_END_INCLUSIVE)
-
-def in_coordinated_range(c,lang):
- if not is_supported_language(lang):
- raise IndicNlpException('Language {} not supported'.format(lang))
- return in_coordinated_range_offset(get_offset(c,lang))
-
-def get_phonetic_info(lang):
- if not is_supported_language(lang):
- raise IndicNlpException('Language {} not supported'.format(lang))
- phonetic_data= ALL_PHONETIC_DATA if lang!=li.LC_TA else TAMIL_PHONETIC_DATA
- phonetic_vectors= ALL_PHONETIC_VECTORS if lang!=li.LC_TA else TAMIL_PHONETIC_VECTORS
-
- return (phonetic_data, phonetic_vectors)
-
-def invalid_vector():
- ## TODO: check if np datatype is correct?
- return np.array([0]*PHONETIC_VECTOR_LENGTH)
-
-def get_phonetic_feature_vector(c,lang):
-
- offset=get_offset(c,lang)
-
- if not in_coordinated_range_offset(offset):
- return invalid_vector()
-
- phonetic_data, phonetic_vectors= get_phonetic_info(lang)
-
- if phonetic_data.iloc[offset]['Valid Vector Representation']==0:
- return invalid_vector()
-
- return phonetic_vectors[offset]
-
-def get_phonetic_feature_vector_offset(offset,lang):
-
- if not in_coordinated_range_offset(offset):
- return invalid_vector()
-
- phonetic_data, phonetic_vectors= get_phonetic_info(lang)
-
- if phonetic_data.iloc[offset]['Valid Vector Representation']==0:
- return invalid_vector()
-
- return phonetic_vectors[offset]
-
-### Unary operations on vectors
-def is_valid(v):
- return np.sum(v)>0
-
-def is_vowel(v):
- return v[PVIDX_BT_VOWEL]==1
-
-def is_consonant(v):
- return v[PVIDX_BT_CONSONANT]==1
-
-def is_halant(v):
- return v[PVIDX_BT_HALANT]==1
-
-def is_nukta(v):
- return v[PVIDX_BT_NUKTA]==1
-
-def is_anusvaar(v):
- return v[PVIDX_BT_ANUSVAAR]==1
-
-def is_misc(v):
- return v[PVIDX_BT_MISC]==1
-
-def is_dependent_vowel(v):
- return is_vowel(v) and v[PVIDX_VSTAT_DEP]==1
-
-def is_plosive(v):
- return is_consonant(v) and get_property_vector(v,'consonant_type')[0]==1
-
-### Binary operations on phonetic vectors
-
-def or_vectors(v1,v2):
- return np.array([ 1 if (b1+b2)>=1 else 0 for b1,b2 in zip(v1,v2) ])
-
-def xor_vectors(v1,v2):
- return np.array([ 1 if b1!=b2 else 0 for b1,b2 in zip(v1,v2) ])
-
-### Getting properties from phonetic vectors
-
-def get_property_vector(v,prop_name):
- return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]]
-
-def get_property_value(v,prop_name):
- factor_bits=get_property_vector(v,prop_name).tolist()
-
- v=0
- c=1
- for b in factor_bits[::-1]:
- v+=(c*b)
- c=c*2.0
-
- return int(v)
-
-def lcsr_indic(srcw,tgtw,slang,tlang):
- """
- compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level.
- This works for Indic scripts by mapping both languages to a common script
-
- srcw: source language string
- tgtw: source language string
- slang: source language
- tlang: target language
- """
- score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
-
- for si,sc in enumerate(srcw,1):
- for ti,tc in enumerate(tgtw,1):
- so=get_offset(sc,slang)
- to=get_offset(tc,tlang)
-
- if in_coordinated_range_offset(so) and in_coordinated_range_offset(to) and so==to:
- score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
- elif not (in_coordinated_range_offset(so) or in_coordinated_range_offset(to)) and sc==tc:
- score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
- else:
- score_mat[si,ti]= max(
- score_mat[si,ti-1],
- score_mat[si-1,ti])
-
- return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw)))
-
-def lcsr_any(srcw,tgtw):
- """
- LCSR computation if both languages have the same script
- """
- score_mat=np.zeros((len(srcw)+1,len(tgtw)+1))
-
- for si,sc in enumerate(srcw,1):
- for ti,tc in enumerate(tgtw,1):
-
- if sc==tc:
- score_mat[si,ti]=score_mat[si-1,ti-1]+1.0
- else:
- score_mat[si,ti]= max(
- score_mat[si,ti-1],
- score_mat[si-1,ti])
-
- return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw)))
-
-def lcsr(srcw,tgtw,slang,tlang):
- """
- compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level.
-
- srcw: source language string
- tgtw: source language string
- slang: source language
- tlang: target language
- """
-
- if slang==tlang or not is_supported_language(slang) or not is_supported_language(tlang):
- return lcsr_any(srcw,tgtw,slang,tlang)
- else:
- return lcsr_indic(srcw,tgtw)
-
-
-
diff --git a/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py b/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py
deleted file mode 100644
index b77d1aeb34020c4f299f5122798c934f2665db21..0000000000000000000000000000000000000000
--- a/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py
+++ /dev/null
@@ -1,145 +0,0 @@
-import gradio as gr
-import torch
-import numpy as np
-from transformers import AutoProcessor, AutoModel
-from PIL import Image
-import cv2
-from concurrent.futures import ThreadPoolExecutor
-import os
-
-
-MODEL_NAME = "microsoft/xclip-base-patch16-zero-shot"
-CLIP_LEN = 32
-
-# Check if GPU is available and set the device
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-print (device)
-
-# Load model and processor once and move them to the device
-processor = AutoProcessor.from_pretrained(MODEL_NAME)
-model = AutoModel.from_pretrained(MODEL_NAME).to(device)
-
-def get_video_length(file_path):
- cap = cv2.VideoCapture(file_path)
- length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
- cap.release()
- return length
-
-def read_video_opencv(file_path, indices):
- frames = []
- with ThreadPoolExecutor() as executor:
- futures = [executor.submit(get_frame, file_path, i) for i in indices]
- for future in futures:
- frame = future.result()
- if frame is not None:
- frames.append(frame)
- return frames
-
-def get_frame(file_path, index):
- cap = cv2.VideoCapture(file_path)
- cap.set(cv2.CAP_PROP_POS_FRAMES, index)
- ret, frame = cap.read()
- cap.release()
- if ret:
- return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- return None
-
-def sample_uniform_frame_indices(clip_len, seg_len):
- if seg_len < clip_len:
- repeat_factor = np.ceil(clip_len / seg_len).astype(int)
- indices = np.arange(seg_len).tolist() * repeat_factor
- indices = indices[:clip_len]
- else:
- spacing = seg_len // clip_len
- indices = [i * spacing for i in range(clip_len)]
- return np.array(indices).astype(np.int64)
-
-def concatenate_frames(frames, clip_len):
- layout = { 32: (4, 8) }
- rows, cols = layout[clip_len]
- combined_image = Image.new('RGB', (frames[0].shape[1]*cols, frames[0].shape[0]*rows))
- frame_iter = iter(frames)
- y_offset = 0
- for i in range(rows):
- x_offset = 0
- for j in range(cols):
- img = Image.fromarray(next(frame_iter))
- combined_image.paste(img, (x_offset, y_offset))
- x_offset += frames[0].shape[1]
- y_offset += frames[0].shape[0]
- return combined_image
-
-def model_interface(uploaded_video, activity):
- video_length = get_video_length(uploaded_video)
- indices = sample_uniform_frame_indices(CLIP_LEN, seg_len=video_length)
- video = read_video_opencv(uploaded_video, indices)
- concatenated_image = concatenate_frames(video, CLIP_LEN)
-
- activities_list = [activity, "other"]
- inputs = processor(
- text=activities_list,
- videos=list(video),
- return_tensors="pt",
- padding=True,
- )
-
- # Move the tensors to the same device as the model
- for key, value in inputs.items():
- if isinstance(value, torch.Tensor):
- inputs[key] = value.to(device)
-
- with torch.no_grad():
- outputs = model(**inputs)
-
- logits_per_video = outputs.logits_per_video
- probs = logits_per_video.softmax(dim=1)
-
- results_probs = []
- results_logits = []
- max_prob_index = torch.argmax(probs[0]).item()
- for i in range(len(activities_list)):
- current_activity = activities_list[i]
- prob = float(probs[0][i].cpu()) # Move tensor data to CPU for further processing
- logit = float(logits_per_video[0][i].cpu()) # Move tensor data to CPU for further processing
- results_probs.append((current_activity, f"Probability: {prob * 100:.2f}%"))
- results_logits.append((current_activity, f"Raw Score: {logit:.2f}"))
-
- likely_label = activities_list[max_prob_index]
- likely_probability = float(probs[0][max_prob_index].cpu()) * 100 # Move tensor data to CPU
-
- activity_perfomed = False
- if likely_label != 'other':
- activity_perfomed = True
-
- return activity_perfomed, concatenated_image, results_probs, results_logits, [likely_label, likely_probability]
-
-
-# Load video paths from the folder
-#video_folder = "Action Detection Samples"
-#video_files = [os.path.join(video_folder, file) for file in os.listdir(video_folder) if file.endswith('.mp4')] # considering only mp4 files
-
-# Create examples: assuming every video is about 'dancing'
-#examples = [[video, "taking a shot"] for video in video_files]
-
-iface = gr.Interface(
- fn=model_interface,
- inputs=[
- gr.components.Video(label="Upload a video file"),
- gr.components.Text(default="taking a shot", label="Desired Activity to Recognize"),
- ],
- outputs=[
- gr.components.Text(type="text", label="True/False"),
- gr.components.Image(type="pil", label="Sampled Frames"),
- gr.components.Text(type="text", label="Probabilities"),
- gr.components.Text(type="text", label="Raw Scores"),
- gr.components.Text(type="text", label="Top Prediction"),
-
- ],
- title="Action Detection Video",
- description="[Author: Ibrahim Hasani] This Method uses X-CLIP [Version: ZERO SHOT / SAMPLED FRAMES = 32] to determine if an action is being performed in a video or not. (Binaray Classifier). It contrasts an Action against multiple negative labels that are supposedly far enough in the latent semantic space vs the target label. Do not use negative labels in the desired activity, rather the action to be performed.",
- live=False,
- theme=gr.themes.Monochrome(),
- #examples=examples # Add examples to the interface
-)
-
-iface.launch()
\ No newline at end of file
diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html
deleted file mode 100644
index 9fad90d1274bd58111abb76e42e8fbee14c637f4..0000000000000000000000000000000000000000
--- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh b/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh
deleted file mode 100644
index c523d92634d9b61b97bbcdbfd17dfc33465bfc09..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-SCRIPT=`realpath $0`
-MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2
-
-export PATH=$PATH:"$MECAB/bin":"$MECAB/lib"
-export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib"
-
-cat - | mecab -O wakati
diff --git a/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py b/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py
deleted file mode 100644
index b0bc913651bd76667e25c214acb70f2bca19e185..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import os
-from contextlib import redirect_stdout
-
-from fairseq import options
-from fairseq_cli import generate
-
-from examples.noisychannel import rerank_options, rerank_utils
-
-
-def score_bw(args):
- if args.backwards1:
- scorer1_src = args.target_lang
- scorer1_tgt = args.source_lang
- else:
- scorer1_src = args.source_lang
- scorer1_tgt = args.target_lang
-
- if args.score_model2 is not None:
- if args.backwards2:
- scorer2_src = args.target_lang
- scorer2_tgt = args.source_lang
- else:
- scorer2_src = args.source_lang
- scorer2_tgt = args.target_lang
-
- rerank1_is_gen = (
- args.gen_model == args.score_model1 and args.source_prefix_frac is None
- )
- rerank2_is_gen = (
- args.gen_model == args.score_model2 and args.source_prefix_frac is None
- )
-
- (
- pre_gen,
- left_to_right_preprocessed_dir,
- right_to_left_preprocessed_dir,
- backwards_preprocessed_dir,
- lm_preprocessed_dir,
- ) = rerank_utils.get_directories(
- args.data_dir_name,
- args.num_rescore,
- args.gen_subset,
- args.gen_model_name,
- args.shard_id,
- args.num_shards,
- args.sampling,
- args.prefix_len,
- args.target_prefix_frac,
- args.source_prefix_frac,
- )
-
- score1_file = rerank_utils.rescore_file_name(
- pre_gen,
- args.prefix_len,
- args.model1_name,
- target_prefix_frac=args.target_prefix_frac,
- source_prefix_frac=args.source_prefix_frac,
- backwards=args.backwards1,
- )
-
- if args.score_model2 is not None:
- score2_file = rerank_utils.rescore_file_name(
- pre_gen,
- args.prefix_len,
- args.model2_name,
- target_prefix_frac=args.target_prefix_frac,
- source_prefix_frac=args.source_prefix_frac,
- backwards=args.backwards2,
- )
-
- if args.right_to_left1:
- rerank_data1 = right_to_left_preprocessed_dir
- elif args.backwards1:
- rerank_data1 = backwards_preprocessed_dir
- else:
- rerank_data1 = left_to_right_preprocessed_dir
-
- gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"]
- if not rerank1_is_gen and not os.path.isfile(score1_file):
- print("STEP 4: score the translations for model 1")
-
- model_param1 = [
- "--path",
- args.score_model1,
- "--source-lang",
- scorer1_src,
- "--target-lang",
- scorer1_tgt,
- ]
- gen_model1_param = [rerank_data1] + gen_param + model_param1
-
- gen_parser = options.get_generation_parser()
- input_args = options.parse_args_and_arch(gen_parser, gen_model1_param)
-
- with open(score1_file, "w") as f:
- with redirect_stdout(f):
- generate.main(input_args)
-
- if (
- args.score_model2 is not None
- and not os.path.isfile(score2_file)
- and not rerank2_is_gen
- ):
- print("STEP 4: score the translations for model 2")
-
- if args.right_to_left2:
- rerank_data2 = right_to_left_preprocessed_dir
- elif args.backwards2:
- rerank_data2 = backwards_preprocessed_dir
- else:
- rerank_data2 = left_to_right_preprocessed_dir
-
- model_param2 = [
- "--path",
- args.score_model2,
- "--source-lang",
- scorer2_src,
- "--target-lang",
- scorer2_tgt,
- ]
- gen_model2_param = [rerank_data2] + gen_param + model_param2
-
- gen_parser = options.get_generation_parser()
- input_args = options.parse_args_and_arch(gen_parser, gen_model2_param)
-
- with open(score2_file, "w") as f:
- with redirect_stdout(f):
- generate.main(input_args)
-
-
-def cli_main():
- parser = rerank_options.get_reranking_parser()
- args = options.parse_args_and_arch(parser)
- score_bw(args)
-
-
-if __name__ == "__main__":
- cli_main()
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py
deleted file mode 100644
index ff3e14bf14770638524ef6067b558e455dbe5f2b..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py
+++ /dev/null
@@ -1,471 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-import numpy as np
-import torch
-from fairseq.data import FairseqDataset, data_utils
-
-
-logger = logging.getLogger(__name__)
-
-
-def collate(
- samples,
- pad_idx,
- eos_idx,
- left_pad_source=True,
- left_pad_target=False,
- input_feeding=True,
- pad_to_length=None,
- pad_to_multiple=1,
-):
- if len(samples) == 0:
- return {}
-
- def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
- return data_utils.collate_tokens(
- [s[key] for s in samples],
- pad_idx,
- eos_idx,
- left_pad,
- move_eos_to_beginning,
- pad_to_length=pad_to_length,
- pad_to_multiple=pad_to_multiple,
- )
-
- def check_alignment(alignment, src_len, tgt_len):
- if alignment is None or len(alignment) == 0:
- return False
- if (
- alignment[:, 0].max().item() >= src_len - 1
- or alignment[:, 1].max().item() >= tgt_len - 1
- ):
- logger.warning("alignment size mismatch found, skipping alignment!")
- return False
- return True
-
- def compute_alignment_weights(alignments):
- """
- Given a tensor of shape [:, 2] containing the source-target indices
- corresponding to the alignments, a weight vector containing the
- inverse frequency of each target index is computed.
- For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
- a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
- index 3 is repeated twice)
- """
- align_tgt = alignments[:, 1]
- _, align_tgt_i, align_tgt_c = torch.unique(
- align_tgt, return_inverse=True, return_counts=True
- )
- align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
- return 1.0 / align_weights.float()
-
- id = torch.LongTensor([s["id"] for s in samples])
- src_tokens = merge(
- "source",
- left_pad=left_pad_source,
- pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
- )
- # sort by descending source length
- src_lengths = torch.LongTensor(
- [s["source"].ne(pad_idx).long().sum() for s in samples]
- )
- src_lengths, sort_order = src_lengths.sort(descending=True)
- id = id.index_select(0, sort_order)
- src_tokens = src_tokens.index_select(0, sort_order)
-
- prev_output_tokens = None
- target = None
- if samples[0].get("target", None) is not None:
- target = merge(
- "target",
- left_pad=left_pad_target,
- pad_to_length=pad_to_length["target"]
- if pad_to_length is not None
- else None,
- )
- target = target.index_select(0, sort_order)
- tgt_lengths = torch.LongTensor(
- [s["target"].ne(pad_idx).long().sum() for s in samples]
- ).index_select(0, sort_order)
- ntokens = tgt_lengths.sum().item()
-
- if samples[0].get("prev_output_tokens", None) is not None:
- prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
- elif input_feeding:
- # we create a shifted version of targets for feeding the
- # previous output token(s) into the next decoder step
- prev_output_tokens = merge(
- "target",
- left_pad=left_pad_target,
- move_eos_to_beginning=True,
- pad_to_length=pad_to_length["target"]
- if pad_to_length is not None
- else None,
- )
- else:
- ntokens = src_lengths.sum().item()
-
- batch = {
- "id": id,
- "nsentences": len(samples),
- "ntokens": ntokens,
- "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,},
- "target": target,
- }
- if prev_output_tokens is not None:
- batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
- 0, sort_order
- )
-
- if samples[0].get("alignment", None) is not None:
- bsz, tgt_sz = batch["target"].shape
- src_sz = batch["net_input"]["src_tokens"].shape[1]
-
- offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
- offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
- if left_pad_source:
- offsets[:, 0] += src_sz - src_lengths
- if left_pad_target:
- offsets[:, 1] += tgt_sz - tgt_lengths
-
- alignments = [
- alignment + offset
- for align_idx, offset, src_len, tgt_len in zip(
- sort_order, offsets, src_lengths, tgt_lengths
- )
- for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
- if check_alignment(alignment, src_len, tgt_len)
- ]
-
- if len(alignments) > 0:
- alignments = torch.cat(alignments, dim=0)
- align_weights = compute_alignment_weights(alignments)
-
- batch["alignments"] = alignments
- batch["align_weights"] = align_weights
-
- if samples[0].get("constraints", None) is not None:
- # Collate the packed constraints across the samples, padding to
- # the length of the longest sample.
- lens = [sample.get("constraints").size(0) for sample in samples]
- max_len = max(lens)
- constraints = torch.zeros((len(samples), max(lens))).long()
- for i, sample in enumerate(samples):
- constraints[i, 0 : lens[i]] = samples[i].get("constraints")
- batch["constraints"] = constraints.index_select(0, sort_order)
-
- return batch
-
-
-class LanguagePairDataset(FairseqDataset):
- """
- A pair of torch.utils.data.Datasets.
-
- Args:
- src (torch.utils.data.Dataset): source dataset to wrap
- src_sizes (List[int]): source sentence lengths
- src_dict (~fairseq.data.Dictionary): source vocabulary
- tgt (torch.utils.data.Dataset, optional): target dataset to wrap
- tgt_sizes (List[int], optional): target sentence lengths
- tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
- left_pad_source (bool, optional): pad source tensors on the left side
- (default: True).
- left_pad_target (bool, optional): pad target tensors on the left side
- (default: False).
- shuffle (bool, optional): shuffle dataset elements before batching
- (default: True).
- input_feeding (bool, optional): create a shifted version of the targets
- to be passed into the model for teacher forcing (default: True).
- remove_eos_from_source (bool, optional): if set, removes eos from end
- of source if it's present (default: False).
- append_eos_to_target (bool, optional): if set, appends eos to end of
- target if it's absent (default: False).
- align_dataset (torch.utils.data.Dataset, optional): dataset
- containing alignments.
- constraints (Tensor, optional): 2d tensor with a concatenated, zero-
- delimited list of constraints for each sentence.
- append_bos (bool, optional): if set, appends bos to the beginning of
- source/target sentence.
- num_buckets (int, optional): if set to a value greater than 0, then
- batches will be bucketed into the given number of batch shapes.
- src_lang_id (int, optional): source language ID, if set, the collated batch
- will contain a field 'src_lang_id' in 'net_input' which indicates the
- source language of the samples.
- tgt_lang_id (int, optional): target language ID, if set, the collated batch
- will contain a field 'tgt_lang_id' which indicates the target language
- of the samples.
- """
-
- def __init__(
- self,
- src,
- src_sizes,
- src_dict,
- tgt=None,
- tgt_sizes=None,
- tgt_dict=None,
- left_pad_source=True,
- left_pad_target=False,
- shuffle=True,
- input_feeding=True,
- remove_eos_from_source=False,
- append_eos_to_target=False,
- align_dataset=None,
- constraints=None,
- append_bos=False,
- eos=None,
- num_buckets=0,
- src_lang_id=None,
- tgt_lang_id=None,
- pad_to_multiple=1,
- ):
- if tgt_dict is not None:
- assert src_dict.pad() == tgt_dict.pad()
- assert src_dict.eos() == tgt_dict.eos()
- assert src_dict.unk() == tgt_dict.unk()
- if tgt is not None:
- assert len(src) == len(
- tgt
- ), "Source and target must contain the same number of examples"
- self.src = src
- self.tgt = tgt
- self.src_sizes = np.array(src_sizes)
- self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
- self.sizes = (
- np.vstack((self.src_sizes, self.tgt_sizes)).T
- if self.tgt_sizes is not None
- else self.src_sizes
- )
- self.src_dict = src_dict
- self.tgt_dict = tgt_dict
- self.left_pad_source = left_pad_source
- self.left_pad_target = left_pad_target
- self.shuffle = shuffle
- self.input_feeding = input_feeding
- self.remove_eos_from_source = remove_eos_from_source
- self.append_eos_to_target = append_eos_to_target
- self.align_dataset = align_dataset
- if self.align_dataset is not None:
- assert (
- self.tgt_sizes is not None
- ), "Both source and target needed when alignments are provided"
- self.constraints = constraints
- self.append_bos = append_bos
- self.eos = eos if eos is not None else src_dict.eos()
- self.src_lang_id = src_lang_id
- self.tgt_lang_id = tgt_lang_id
- if num_buckets > 0:
- from fairseq.data import BucketPadLengthDataset
-
- self.src = BucketPadLengthDataset(
- self.src,
- sizes=self.src_sizes,
- num_buckets=num_buckets,
- pad_idx=self.src_dict.pad(),
- left_pad=self.left_pad_source,
- )
- self.src_sizes = self.src.sizes
- logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
- if self.tgt is not None:
- self.tgt = BucketPadLengthDataset(
- self.tgt,
- sizes=self.tgt_sizes,
- num_buckets=num_buckets,
- pad_idx=self.tgt_dict.pad(),
- left_pad=self.left_pad_target,
- )
- self.tgt_sizes = self.tgt.sizes
- logger.info(
- "bucketing target lengths: {}".format(list(self.tgt.buckets))
- )
-
- # determine bucket sizes using self.num_tokens, which will return
- # the padded lengths (thanks to BucketPadLengthDataset)
- num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long])
- self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
- self.buckets = [
- (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
- ]
- else:
- self.buckets = None
- self.pad_to_multiple = pad_to_multiple
-
- def get_batch_shapes(self):
- return self.buckets
-
- def __getitem__(self, index):
- tgt_item = self.tgt[index] if self.tgt is not None else None
- src_item = self.src[index]
- # Append EOS to end of tgt sentence if it does not have an EOS and remove
- # EOS from end of src sentence if it exists. This is useful when we use
- # use existing datasets for opposite directions i.e., when we want to
- # use tgt_dataset as src_dataset and vice versa
- if self.append_eos_to_target:
- eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
- if self.tgt and self.tgt[index][-1] != eos:
- tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
-
- if self.append_bos:
- bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
- if self.tgt and self.tgt[index][0] != bos:
- tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
-
- bos = self.src_dict.bos()
- if self.src[index][0] != bos:
- src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
-
- if self.remove_eos_from_source:
- eos = self.src_dict.eos()
- if self.src[index][-1] == eos:
- src_item = self.src[index][:-1]
-
- example = {
- "id": index,
- "source": src_item,
- "target": tgt_item,
- }
- if self.align_dataset is not None:
- example["alignment"] = self.align_dataset[index]
- if self.constraints is not None:
- example["constraints"] = self.constraints[index]
- return example
-
- def __len__(self):
- return len(self.src)
-
- def collater(self, samples, pad_to_length=None):
- """Merge a list of samples to form a mini-batch.
-
- Args:
- samples (List[dict]): samples to collate
- pad_to_length (dict, optional): a dictionary of
- {'source': source_pad_to_length, 'target': target_pad_to_length}
- to indicate the max length to pad to in source and target respectively.
-
- Returns:
- dict: a mini-batch with the following keys:
-
- - `id` (LongTensor): example IDs in the original input order
- - `ntokens` (int): total number of tokens in the batch
- - `net_input` (dict): the input to the Model, containing keys:
-
- - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
- the source sentence of shape `(bsz, src_len)`. Padding will
- appear on the left if *left_pad_source* is ``True``.
- - `src_lengths` (LongTensor): 1D Tensor of the unpadded
- lengths of each source sentence of shape `(bsz)`
- - `prev_output_tokens` (LongTensor): a padded 2D Tensor of
- tokens in the target sentence, shifted right by one
- position for teacher forcing, of shape `(bsz, tgt_len)`.
- This key will not be present if *input_feeding* is
- ``False``. Padding will appear on the left if
- *left_pad_target* is ``True``.
- - `src_lang_id` (LongTensor): a long Tensor which contains source
- language IDs of each sample in the batch
-
- - `target` (LongTensor): a padded 2D Tensor of tokens in the
- target sentence of shape `(bsz, tgt_len)`. Padding will appear
- on the left if *left_pad_target* is ``True``.
- - `tgt_lang_id` (LongTensor): a long Tensor which contains target language
- IDs of each sample in the batch
- """
- res = collate(
- samples,
- pad_idx=self.src_dict.pad(),
- eos_idx=self.eos,
- left_pad_source=self.left_pad_source,
- left_pad_target=self.left_pad_target,
- input_feeding=self.input_feeding,
- pad_to_length=pad_to_length,
- pad_to_multiple=self.pad_to_multiple,
- )
- if self.src_lang_id is not None or self.tgt_lang_id is not None:
- src_tokens = res["net_input"]["src_tokens"]
- bsz = src_tokens.size(0)
- if self.src_lang_id is not None:
- res["net_input"]["src_lang_id"] = (
- torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
- )
- if self.tgt_lang_id is not None:
- res["tgt_lang_id"] = (
- torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
- )
- return res
-
- def num_tokens(self, index):
- """Return the number of tokens in a sample. This value is used to
- enforce ``--max-tokens`` during batching."""
- return max(
- self.src_sizes[index],
- self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
- )
-
- def num_tokens_vec(self, indices):
- """Return the number of tokens for a set of positions defined by indices.
- This value is used to enforce ``--max-tokens`` during batching."""
- sizes = self.src_sizes[indices]
- if self.tgt_sizes is not None:
- sizes = np.maximum(sizes, self.tgt_sizes[indices])
- return sizes
-
- def size(self, index):
- """Return an example's size as a float or tuple. This value is used when
- filtering a dataset with ``--max-positions``."""
- return (
- self.src_sizes[index],
- self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
- )
-
- def ordered_indices(self):
- """Return an ordered list of indices. Batches will be constructed based
- on this order."""
- if self.shuffle:
- indices = np.random.permutation(len(self)).astype(np.int64)
- else:
- indices = np.arange(len(self), dtype=np.int64)
- if self.buckets is None:
- # sort by target length, then source length
- if self.tgt_sizes is not None:
- indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
- return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
- else:
- # sort by bucketed_num_tokens, which is:
- # max(padded_src_len, padded_tgt_len)
- return indices[
- np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
- ]
-
- @property
- def supports_prefetch(self):
- return getattr(self.src, "supports_prefetch", False) and (
- getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
- )
-
- def prefetch(self, indices):
- self.src.prefetch(indices)
- if self.tgt is not None:
- self.tgt.prefetch(indices)
- if self.align_dataset is not None:
- self.align_dataset.prefetch(indices)
-
- def filter_indices_by_size(self, indices, max_sizes):
- """Filter a list of sample indices. Remove those that are longer
- than specified in max_sizes.
-
- Args:
- indices (np.array): original array of sample indices
- max_sizes (int or list[int] or tuple[int]): max sample size,
- can be defined separately for src and tgt (then list or tuple)
-
- Returns:
- np.array: filtered sample array
- list: list of removed indices
- """
- return data_utils.filter_paired_dataset_indices_by_size(
- self.src_sizes, self.tgt_sizes, indices, max_sizes,
- )
diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh
deleted file mode 100644
index 4a4727b0129007d9b0eed3fc25780adb565965a2..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-
-BASEDIR="$(dirname $0)"
-
-# paths are valid for mml7
-
-# select images
-#ls /data/inpainting/work/data/train | shuf | head -2000 | xargs -n1 -I{} cp {} /data/inpainting/mask_analysis/src
-
-# generate masks
-#"$BASEDIR/../gen_debug_mask_dataset.py" \
-# "$BASEDIR/../../configs/debug_mask_gen.yaml" \
-# "/data/inpainting/mask_analysis/src" \
-# "/data/inpainting/mask_analysis/generated"
-
-# predict
-#"$BASEDIR/../predict.py" \
-# model.path="simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/saved_checkpoint/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15_epoch22-step-574999" \
-# indir="/data/inpainting/mask_analysis/generated" \
-# outdir="/data/inpainting/mask_analysis/predicted" \
-# dataset.img_suffix=.jpg \
-# +out_ext=.jpg
-
-# analyze good and bad samples
-"$BASEDIR/../analyze_errors.py" \
- --only-report \
- --n-jobs 8 \
- "$BASEDIR/../../configs/analyze_mask_errors.yaml" \
- "/data/inpainting/mask_analysis/small/generated" \
- "/data/inpainting/mask_analysis/small/predicted" \
- "/data/inpainting/mask_analysis/small/report"
diff --git a/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py b/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py
deleted file mode 100644
index 3a137778e45c464c079658ecb87ec53270e789f7..0000000000000000000000000000000000000000
--- a/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import numpy as np
-import torch
-
-import math
-from copy import deepcopy
-from itertools import product
-from typing import Any, Dict, Generator, ItemsView, List, Tuple
-
-
-class MaskData:
- """
- A structure for storing masks and their related data in batched format.
- Implements basic filtering and concatenation.
- """
-
- def __init__(self, **kwargs) -> None:
- for v in kwargs.values():
- assert isinstance(
- v, (list, np.ndarray, torch.Tensor)
- ), "MaskData only supports list, numpy arrays, and torch tensors."
- self._stats = dict(**kwargs)
-
- def __setitem__(self, key: str, item: Any) -> None:
- assert isinstance(
- item, (list, np.ndarray, torch.Tensor)
- ), "MaskData only supports list, numpy arrays, and torch tensors."
- self._stats[key] = item
-
- def __delitem__(self, key: str) -> None:
- del self._stats[key]
-
- def __getitem__(self, key: str) -> Any:
- return self._stats[key]
-
- def items(self) -> ItemsView[str, Any]:
- return self._stats.items()
-
- def filter(self, keep: torch.Tensor) -> None:
- for k, v in self._stats.items():
- if v is None:
- self._stats[k] = None
- elif isinstance(v, torch.Tensor):
- self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
- elif isinstance(v, np.ndarray):
- self._stats[k] = v[keep.detach().cpu().numpy()]
- elif isinstance(v, list) and keep.dtype == torch.bool:
- self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
- elif isinstance(v, list):
- self._stats[k] = [v[i] for i in keep]
- else:
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
-
- def cat(self, new_stats: "MaskData") -> None:
- for k, v in new_stats.items():
- if k not in self._stats or self._stats[k] is None:
- self._stats[k] = deepcopy(v)
- elif isinstance(v, torch.Tensor):
- self._stats[k] = torch.cat([self._stats[k], v], dim=0)
- elif isinstance(v, np.ndarray):
- self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
- elif isinstance(v, list):
- self._stats[k] = self._stats[k] + deepcopy(v)
- else:
- raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
-
- def to_numpy(self) -> None:
- for k, v in self._stats.items():
- if isinstance(v, torch.Tensor):
- self._stats[k] = v.detach().cpu().numpy()
-
-
-def is_box_near_crop_edge(
- boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
-) -> torch.Tensor:
- """Filter masks at the edge of a crop, but not at the edge of the original image."""
- crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
- orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
- boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
- near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
- near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
- near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
- return torch.any(near_crop_edge, dim=1)
-
-
-def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
- box_xywh = deepcopy(box_xyxy)
- box_xywh[2] = box_xywh[2] - box_xywh[0]
- box_xywh[3] = box_xywh[3] - box_xywh[1]
- return box_xywh
-
-
-def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
- assert len(args) > 0 and all(
- len(a) == len(args[0]) for a in args
- ), "Batched iteration must have inputs of all the same size."
- n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
- for b in range(n_batches):
- yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
-
-
-def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
- """
- Encodes masks to an uncompressed RLE, in the format expected by
- pycoco tools.
- """
- # Put in fortran order and flatten h,w
- b, h, w = tensor.shape
- tensor = tensor.permute(0, 2, 1).flatten(1)
-
- # Compute change indices
- diff = tensor[:, 1:] ^ tensor[:, :-1]
- change_indices = diff.nonzero()
-
- # Encode run length
- out = []
- for i in range(b):
- cur_idxs = change_indices[change_indices[:, 0] == i, 1]
- cur_idxs = torch.cat(
- [
- torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
- cur_idxs + 1,
- torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
- ]
- )
- btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
- counts = [] if tensor[i, 0] == 0 else [0]
- counts.extend(btw_idxs.detach().cpu().tolist())
- out.append({"size": [h, w], "counts": counts})
- return out
-
-
-def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
- """Compute a binary mask from an uncompressed RLE."""
- h, w = rle["size"]
- mask = np.empty(h * w, dtype=bool)
- idx = 0
- parity = False
- for count in rle["counts"]:
- mask[idx : idx + count] = parity
- idx += count
- parity ^= True
- mask = mask.reshape(w, h)
- return mask.transpose() # Put in C order
-
-
-def area_from_rle(rle: Dict[str, Any]) -> int:
- return sum(rle["counts"][1::2])
-
-
-def calculate_stability_score(
- masks: torch.Tensor, mask_threshold: float, threshold_offset: float
-) -> torch.Tensor:
- """
- Computes the stability score for a batch of masks. The stability
- score is the IoU between the binary masks obtained by thresholding
- the predicted mask logits at high and low values.
- """
- # One mask is always contained inside the other.
- # Save memory by preventing unnecesary cast to torch.int64
- intersections = (
- (masks > (mask_threshold + threshold_offset))
- .sum(-1, dtype=torch.int16)
- .sum(-1, dtype=torch.int32)
- )
- unions = (
- (masks > (mask_threshold - threshold_offset))
- .sum(-1, dtype=torch.int16)
- .sum(-1, dtype=torch.int32)
- )
- return intersections / unions
-
-
-def build_point_grid(n_per_side: int) -> np.ndarray:
- """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
- offset = 1 / (2 * n_per_side)
- points_one_side = np.linspace(offset, 1 - offset, n_per_side)
- points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
- points_y = np.tile(points_one_side[:, None], (1, n_per_side))
- points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
- return points
-
-
-def build_all_layer_point_grids(
- n_per_side: int, n_layers: int, scale_per_layer: int
-) -> List[np.ndarray]:
- """Generates point grids for all crop layers."""
- points_by_layer = []
- for i in range(n_layers + 1):
- n_points = int(n_per_side / (scale_per_layer**i))
- points_by_layer.append(build_point_grid(n_points))
- return points_by_layer
-
-
-def generate_crop_boxes(
- im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
-) -> Tuple[List[List[int]], List[int]]:
- """
- Generates a list of crop boxes of different sizes. Each layer
- has (2**i)**2 boxes for the ith layer.
- """
- crop_boxes, layer_idxs = [], []
- im_h, im_w = im_size
- short_side = min(im_h, im_w)
-
- # Original image
- crop_boxes.append([0, 0, im_w, im_h])
- layer_idxs.append(0)
-
- def crop_len(orig_len, n_crops, overlap):
- return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
-
- for i_layer in range(n_layers):
- n_crops_per_side = 2 ** (i_layer + 1)
- overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
-
- crop_w = crop_len(im_w, n_crops_per_side, overlap)
- crop_h = crop_len(im_h, n_crops_per_side, overlap)
-
- crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
- crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
-
- # Crops in XYWH format
- for x0, y0 in product(crop_box_x0, crop_box_y0):
- box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
- crop_boxes.append(box)
- layer_idxs.append(i_layer + 1)
-
- return crop_boxes, layer_idxs
-
-
-def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
- x0, y0, _, _ = crop_box
- offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
- # Check if boxes has a channel dimension
- if len(boxes.shape) == 3:
- offset = offset.unsqueeze(1)
- return boxes + offset
-
-
-def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
- x0, y0, _, _ = crop_box
- offset = torch.tensor([[x0, y0]], device=points.device)
- # Check if points has a channel dimension
- if len(points.shape) == 3:
- offset = offset.unsqueeze(1)
- return points + offset
-
-
-def uncrop_masks(
- masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
-) -> torch.Tensor:
- x0, y0, x1, y1 = crop_box
- if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
- return masks
- # Coordinate transform masks
- pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
- pad = (x0, pad_x - x0, y0, pad_y - y0)
- return torch.nn.functional.pad(masks, pad, value=0)
-
-
-def remove_small_regions(
- mask: np.ndarray, area_thresh: float, mode: str
-) -> Tuple[np.ndarray, bool]:
- """
- Removes small disconnected regions and holes in a mask. Returns the
- mask and an indicator of if the mask has been modified.
- """
- import cv2 # type: ignore
-
- assert mode in ["holes", "islands"]
- correct_holes = mode == "holes"
- working_mask = (correct_holes ^ mask).astype(np.uint8)
- n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
- sizes = stats[:, -1][1:] # Row 0 is background label
- small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
- if len(small_regions) == 0:
- return mask, False
- fill_labels = [0] + small_regions
- if not correct_holes:
- fill_labels = [i for i in range(n_labels) if i not in fill_labels]
- # If every region is below threshold, keep largest
- if len(fill_labels) == 0:
- fill_labels = [int(np.argmax(sizes)) + 1]
- mask = np.isin(regions, fill_labels)
- return mask, True
-
-
-def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
- from pycocotools import mask as mask_utils # type: ignore
-
- h, w = uncompressed_rle["size"]
- rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
- rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
- return rle
-
-
-def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
- """
- Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
- an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
- """
- # torch.max below raises an error on empty inputs, just skip in this case
- if torch.numel(masks) == 0:
- return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
-
- # Normalize shape to CxHxW
- shape = masks.shape
- h, w = shape[-2:]
- if len(shape) > 2:
- masks = masks.flatten(0, -3)
- else:
- masks = masks.unsqueeze(0)
-
- # Get top and bottom edges
- in_height, _ = torch.max(masks, dim=-1)
- in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
- bottom_edges, _ = torch.max(in_height_coords, dim=-1)
- in_height_coords = in_height_coords + h * (~in_height)
- top_edges, _ = torch.min(in_height_coords, dim=-1)
-
- # Get left and right edges
- in_width, _ = torch.max(masks, dim=-2)
- in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
- right_edges, _ = torch.max(in_width_coords, dim=-1)
- in_width_coords = in_width_coords + w * (~in_width)
- left_edges, _ = torch.min(in_width_coords, dim=-1)
-
- # If the mask is empty the right edge will be to the left of the left edge.
- # Replace these boxes with [0, 0, 0, 0]
- empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
- out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
- out = out * (~empty_filter).unsqueeze(-1)
-
- # Return to original shape
- if len(shape) > 2:
- out = out.reshape(*shape[:-2], 4)
- else:
- out = out[0]
-
- return out
diff --git a/spaces/Justin-Choo/Lemon_WEB_UI/app.py b/spaces/Justin-Choo/Lemon_WEB_UI/app.py
deleted file mode 100644
index ca8bed6e2dffb239c72a70a035ed537ebffe6446..0000000000000000000000000000000000000000
--- a/spaces/Justin-Choo/Lemon_WEB_UI/app.py
+++ /dev/null
@@ -1,149 +0,0 @@
-import os
-from sys import executable as pyexecutable
-import subprocess
-import pathlib
-import gc
-
-def Gitclone(URI:str,ClonePath:str = "") -> int :
- if(ClonePath == "") :
- while True:
- i=subprocess.run([r"git",r"clone",URI])
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
- else:
- while True:
- i=subprocess.run([r"git",r"clone",URI,ClonePath])
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
-def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int:
- while (True):
- i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]);
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
-user_home =pathlib.Path.home().resolve()
-os.chdir(str(user_home))
-#clone stable-diffusion-webui repo
-print("cloning stable-diffusion-webui repo")
-Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui"))
-os.chdir(str(user_home / r"stable-diffusion-webui"))
-os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045")
-#
-
-#install extensions
-print("installing extensions")
-Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative"))
-Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive"))
-DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth")
-while True:
- if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0):
- break
-Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" ))
-#Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser"))
-Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface"))
-Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser"))
-Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks"))
-Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet"))
-Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor"))
-Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib"))
-Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex"))
-Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor"))
-#中文本地化的请解除下一行的注释
-#Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN"))
-Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete"))
-Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels"))
-Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui"))
-Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin"))
-
-#Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" ))
-Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg"))
-Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot"))
-Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo"))
-
-os.chdir(user_home / r"stable-diffusion-webui")
-
-#download ControlNet models
-print("extensions dolwnload done .\ndownloading ControlNet models")
-dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"]
-for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name)
-del dList
-
-#download model
-#you can change model download address here
-print("ControlNet models download done.\ndownloading model")
-DownLoad(r"https://huggingface.co/iZELX1/Grapefruit/resolve/main/lemon.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"lemon.safetensors")
-
-#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt")
-#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt")
-#DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors")
-#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors")
-#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"orangemix.vae.pt")
-#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_BakedVAE.safetensors")
-#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors")
-#DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors")
-
-DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors")
-DownLoad(r"https://civitai.com/api/download/models/21065",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"LAS.safetensors")
-DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors")
-#strt webui
-
-print("Done\nStarting Webui...")
-os.chdir(user_home / r"stable-diffusion-webui")
-while True:
- ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")])
- if(ret.returncode == 0 ):
- del ret
- gc.collect()
- else :
- del ret
-
-del os ,user_home ,pyexecutable ,subprocess
\ No newline at end of file
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py b/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py
deleted file mode 100644
index a4ff5ea3ddd7c612c640544099ab98a861b8fe35..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import json
-import re
-
-# Define regular expression patterns
-pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)"""
-
-# Initialize the dictionary to store key-value pairs
-data = {}
-
-
-def process(fn: str):
- global data
- with open(fn, "r", encoding="utf-8") as f:
- contents = f.read()
- matches = re.findall(pattern, contents)
- for key in matches:
- key = eval(key)
- print("extract:", key)
- data[key] = key
-
-
-print("processing infer-web.py")
-process("infer-web.py")
-
-print("processing gui_v0.py")
-process("gui_v0.py")
-
-print("processing gui_v1.py")
-process("gui_v1.py")
-
-# Save as a JSON file
-with open("./i18n/en_US.json", "w", encoding="utf-8") as f:
- json.dump(data, f, ensure_ascii=False, indent=4)
- f.write("\n")
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py
deleted file mode 100644
index a38b7bb3ae3136b07eadfc2db445fef4c2de186b..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import torch
-from torch import nn
-import torch.nn.functional as F
-
-from . import spec_utils
-
-
-class Conv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(Conv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nout,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- bias=False,
- ),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class SeperableConv2DBNActiv(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
- super(SeperableConv2DBNActiv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(
- nin,
- nin,
- kernel_size=ksize,
- stride=stride,
- padding=pad,
- dilation=dilation,
- groups=nin,
- bias=False,
- ),
- nn.Conv2d(nin, nout, kernel_size=1, bias=False),
- nn.BatchNorm2d(nout),
- activ(),
- )
-
- def __call__(self, x):
- return self.conv(x)
-
-
-class Encoder(nn.Module):
- def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
- super(Encoder, self).__init__()
- self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
-
- def __call__(self, x):
- skip = self.conv1(x)
- h = self.conv2(skip)
-
- return h, skip
-
-
-class Decoder(nn.Module):
- def __init__(
- self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
- ):
- super(Decoder, self).__init__()
- self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
- self.dropout = nn.Dropout2d(0.1) if dropout else None
-
- def __call__(self, x, skip=None):
- x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
- if skip is not None:
- skip = spec_utils.crop_center(skip, x)
- x = torch.cat([x, skip], dim=1)
- h = self.conv(x)
-
- if self.dropout is not None:
- h = self.dropout(h)
-
- return h
-
-
-class ASPPModule(nn.Module):
- def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
- super(ASPPModule, self).__init__()
- self.conv1 = nn.Sequential(
- nn.AdaptiveAvgPool2d((1, None)),
- Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
- )
- self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
- self.conv3 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
- )
- self.conv4 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
- )
- self.conv5 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv6 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.conv7 = SeperableConv2DBNActiv(
- nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
- )
- self.bottleneck = nn.Sequential(
- Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
- )
-
- def forward(self, x):
- _, _, h, w = x.size()
- feat1 = F.interpolate(
- self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
- )
- feat2 = self.conv2(x)
- feat3 = self.conv3(x)
- feat4 = self.conv4(x)
- feat5 = self.conv5(x)
- feat6 = self.conv6(x)
- feat7 = self.conv7(x)
- out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
- bottle = self.bottleneck(out)
- return bottle
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py
deleted file mode 100644
index e16ff85f7037b36fb2046fcbcd3af523050a6516..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from .csp_darknet import CSPDarknet
-from .cspnext import CSPNeXt
-from .darknet import Darknet
-from .detectors_resnet import DetectoRS_ResNet
-from .detectors_resnext import DetectoRS_ResNeXt
-from .efficientnet import EfficientNet
-from .hourglass import HourglassNet
-from .hrnet import HRNet
-from .mobilenet_v2 import MobileNetV2
-from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2
-from .regnet import RegNet
-from .res2net import Res2Net
-from .resnest import ResNeSt
-from .resnet import ResNet, ResNetV1d
-from .resnext import ResNeXt
-from .ssd_vgg import SSDVGG
-from .swin import SwinTransformer
-from .trident_resnet import TridentResNet
-
-__all__ = [
- 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',
- 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',
- 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',
- 'SwinTransformer', 'PyramidVisionTransformer',
- 'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt'
-]
diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py
deleted file mode 100644
index 29b2d78eec2b4de5e617a21120abd5fb5a716ee5..0000000000000000000000000000000000000000
--- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
-import parselmouth
-import numpy as np
-
-
-class PMF0Predictor(F0Predictor):
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
- self.hop_length = hop_length
- self.f0_min = f0_min
- self.f0_max = f0_max
- self.sampling_rate = sampling_rate
-
- def interpolate_f0(self, f0):
- """
- 对F0进行插值处理
- """
-
- data = np.reshape(f0, (f0.size, 1))
-
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
- vuv_vector[data > 0.0] = 1.0
- vuv_vector[data <= 0.0] = 0.0
-
- ip_data = data
-
- frame_number = data.size
- last_value = 0.0
- for i in range(frame_number):
- if data[i] <= 0.0:
- j = i + 1
- for j in range(i + 1, frame_number):
- if data[j] > 0.0:
- break
- if j < frame_number - 1:
- if last_value > 0.0:
- step = (data[j] - data[i - 1]) / float(j - i)
- for k in range(i, j):
- ip_data[k] = data[i - 1] + step * (k - i + 1)
- else:
- for k in range(i, j):
- ip_data[k] = data[j]
- else:
- for k in range(i, frame_number):
- ip_data[k] = last_value
- else:
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
- last_value = data[i]
-
- return ip_data[:, 0], vuv_vector[:, 0]
-
- def compute_f0(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0
-
- def compute_f0_uv(self, wav, p_len=None):
- x = wav
- if p_len is None:
- p_len = x.shape[0] // self.hop_length
- else:
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
- time_step = self.hop_length / self.sampling_rate * 1000
- f0 = (
- parselmouth.Sound(x, self.sampling_rate)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=self.f0_min,
- pitch_ceiling=self.f0_max,
- )
- .selected_array["frequency"]
- )
-
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
- f0, uv = self.interpolate_f0(f0)
- return f0, uv
diff --git a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py b/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py
deleted file mode 100644
index ec4849285c3fb1ee5ccad6ffd220787108482150..0000000000000000000000000000000000000000
--- a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8; py-indent-offset:4 -*-
-###############################################################################
-#
-# Copyright (C) 2015-2020 Daniel Rodriguez
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-#
-###############################################################################
-from __future__ import (absolute_import, division, print_function,
- unicode_literals)
-
-import backtrader as bt
-from . import TimeDrawDown
-
-
-__all__ = ['Calmar']
-
-
-class Calmar(bt.TimeFrameAnalyzerBase):
- '''This analyzer calculates the CalmarRatio
- timeframe which can be different from the one used in the underlying data
- Params:
-
- - ``timeframe`` (default: ``None``)
- If ``None`` the ``timeframe`` of the 1st data in the system will be
- used
-
- Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no
- time constraints
-
- - ``compression`` (default: ``None``)
-
- Only used for sub-day timeframes to for example work on an hourly
- timeframe by specifying "TimeFrame.Minutes" and 60 as compression
-
- If ``None`` then the compression of the 1st data of the system will be
- used
- - *None*
-
- - ``fund`` (default: ``None``)
-
- If ``None`` the actual mode of the broker (fundmode - True/False) will
- be autodetected to decide if the returns are based on the total net
- asset value or on the fund value. See ``set_fundmode`` in the broker
- documentation
-
- Set it to ``True`` or ``False`` for a specific behavior
-
- See also:
-
- - https://en.wikipedia.org/wiki/Calmar_ratio
-
- Methods:
- - ``get_analysis``
-
- Returns a OrderedDict with a key for the time period and the
- corresponding rolling Calmar ratio
-
- Attributes:
- - ``calmar`` the latest calculated calmar ratio
- '''
-
- packages = ('collections', 'math',)
-
- params = (
- ('timeframe', bt.TimeFrame.Months), # default in calmar
- ('period', 36),
- ('fund', None),
- )
-
- def __init__(self):
- self._maxdd = TimeDrawDown(timeframe=self.p.timeframe,
- compression=self.p.compression)
-
- def start(self):
- self._mdd = float('-inf')
- self._values = collections.deque([float('Nan')] * self.p.period,
- maxlen=self.p.period)
- if self.p.fund is None:
- self._fundmode = self.strategy.broker.fundmode
- else:
- self._fundmode = self.p.fund
-
- if not self._fundmode:
- self._values.append(self.strategy.broker.getvalue())
- else:
- self._values.append(self.strategy.broker.fundvalue)
-
- def on_dt_over(self):
- self._mdd = max(self._mdd, self._maxdd.maxdd)
- if not self._fundmode:
- self._values.append(self.strategy.broker.getvalue())
- else:
- self._values.append(self.strategy.broker.fundvalue)
- rann = math.log(self._values[-1] / self._values[0]) / len(self._values)
- self.calmar = calmar = rann / (self._mdd or float('Inf'))
-
- self.rets[self.dtkey] = calmar
-
- def stop(self):
- self.on_dt_over() # update last values
diff --git a/spaces/Libra7578/Image-to-video/README.md b/spaces/Libra7578/Image-to-video/README.md
deleted file mode 100644
index 17b1d6148c5d9f28e1aa878788a4ab39a2e0b3e9..0000000000000000000000000000000000000000
--- a/spaces/Libra7578/Image-to-video/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image To Video
-emoji: 🎬
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: other
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py b/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py
deleted file mode 100644
index a99ad55ae3bda76cd858f8567d48dcd5f41d9946..0000000000000000000000000000000000000000
--- a/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from common.utils import HiddenData, OutputData
-from model.decoder.base_decoder import BaseDecoder
-
-
-class AGIFDecoder(BaseDecoder):
- def forward(self, hidden: HiddenData, **kwargs):
- # hidden = self.interaction(hidden)
- pred_intent = self.intent_classifier(hidden)
- intent_index = self.intent_classifier.decode(OutputData(pred_intent, None),
- return_list=False,
- return_sentence_level=True)
- interact_args = {"intent_index": intent_index,
- "batch_size": pred_intent.classifier_output.shape[0],
- "intent_label_num": self.intent_classifier.config["intent_label_num"]}
- pred_slot = self.slot_classifier(hidden, internal_interaction=self.interaction, **interact_args)
- return OutputData(pred_intent, pred_slot)
diff --git a/spaces/LightSY/W2L-TD/facelib/face_api.py b/spaces/LightSY/W2L-TD/facelib/face_api.py
deleted file mode 100644
index d8b103b107ca580d9dcbc4947c21883dff51bc7f..0000000000000000000000000000000000000000
--- a/spaces/LightSY/W2L-TD/facelib/face_api.py
+++ /dev/null
@@ -1,173 +0,0 @@
-import numpy as np
-
-
-# batched detection
-from PIL import Image
-import cv2
-
-def batched_transform(self, frames, use_origin_size):
- """
- Arguments:
- frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
- type=np.float32, BGR format).
- use_origin_size: whether to use origin size.
- """
- from_PIL = True if isinstance(frames[0], Image.Image) else False
-
- # convert to opencv format
- if from_PIL:
- frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
- frames = np.asarray(frames, dtype=np.float32)
-
- # testing scale
- im_size_min = np.min(frames[0].shape[0:2])
- im_size_max = np.max(frames[0].shape[0:2])
- resize = float(self.target_size) / float(im_size_min)
-
- # prevent bigger axis from being more than max_size
- if np.round(resize * im_size_max) > self.max_size:
- resize = float(self.max_size) / float(im_size_max)
- resize = 1 if use_origin_size else resize
-
- # resize
- if resize != 1:
- if not from_PIL:
- frames = F.interpolate(frames, scale_factor=resize)
- else:
- frames = [
- cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
- for frame in frames
- ]
-
- # convert to torch.tensor format
- # if not from_PIL:
- # frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
- # else:
- # frames = frames.transpose((0, 3, 1, 2))
- # frames = torch.from_numpy(frames)
- frames = frames.transpose((0, 3, 1, 2))
- # frames = torch.from_numpy(frames)
-
- return frames, resize
-def __detect_faces(inputs):
- # get scale
- height, width = inputs.shape[2:]
- # self.scale = torch.tensor([width, height, width, height], dtype=torch.float32).to(device)
- scale = np.array([width, height, width, height], dtype=np.float32)
- tmp = [width, height, width, height, width, height, width, height, width, height]
- # self.scale1 = torch.tensor(tmp, dtype=torch.float32).to(device)
- scale1 = np.array(tmp, dtype=np.float32)
-
- # forawrd
- # inputs = inputs.to(device)
- inputs = inputs
- # if self.half_inference:
- # inputs = inputs.half()
- loc, conf, landmarks = self(inputs)
-
- # get priorbox
- priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
- # priors = priorbox.forward().to(device)
- priors = priorbox.forward()
-
- return loc, conf, landmarks, priors
-
-# def batch_detect(net, imgs, device):
-def batch_detect(frames, conf_threshold = 0.8, nms_threshold = 0.4, use_origin_size = True):
-
- frames, resize = batched_transform(frames, use_origin_size)
- frames = frames
- frames = frames - np.array([104, 117, 123])
-
- b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
-
- final_bounding_boxes, final_landmarks = [], []
-
- # decode
- priors = priors.unsqueeze(0)
- b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize
- # b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize
- b_conf = b_conf[:, :, 1]
-
- # index for selection
- b_indice = b_conf > conf_threshold
-
- # concat
- b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float()
-
- for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice):
-
- # ignore low scores
- # pred, landm = pred[inds, :], landm[inds, :]
- pred = pred[inds, :]
- if pred.shape[0] == 0:
- final_bounding_boxes.append(np.array([], dtype=np.float32))
- # final_landmarks.append(np.array([], dtype=np.float32))
- continue
-
- # to CPU
- # bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy() #原本
- # bounding_boxes, landm = pred.cpu().detach().numpy(), landm.cpu().detach().numpy()
- bounding_boxes = pred.cpu().detach().numpy()
-
- # NMS
- keep = py_cpu_nms(bounding_boxes, nms_threshold)
- # bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep]
- bounding_boxes = bounding_boxes[keep, :]
-
- # append
- d = bounding_boxes[0]
- d = np.clip(d, 0, None)
- x1, y1, x2, y2 = map(int, d[:-1])
- final_bounding_boxes.append((x1, y1, x2, y2))
- # final_bounding_boxes.append(bounding_boxes)
- # final_landmarks.append(landmarks)
- # self.t['forward_pass'].toc(average=True)
- # self.batch_time += self.t['forward_pass'].diff
- # self.total_frame += len(frames)
- # print(self.batch_time / self.total_frame)
-
- return final_bounding_boxes
-
-
- imgs = imgs - np.array([104, 117, 123])
- imgs = imgs.transpose(0, 3, 1, 2)
- imgs = np.array(imgs, dtype=np.float32)
- # if 'cuda' in device:
- # torch.backends.cudnn.benchmark = True
-
- # imgs = torch.from_numpy(imgs).float().to(device)
- BB, CC, HH, WW = imgs.shape
- # with torch.no_grad():
- # olist = net(imgs)
- olist = net.run(None, {'img': imgs})
-
- bboxlist = []
- for i in range(len(olist) // 2):
- olist[i * 2] = softmax(olist[i * 2], axis=1)
- # olist = [oelem.data.cpu() for oelem in olist]
- for i in range(len(olist) // 2):
- ocls, oreg = olist[i * 2], olist[i * 2 + 1]
- # FB, FC, FH, FW = ocls.size() # feature map size
- FB, FC, FH, FW = ocls.shape
- stride = 2**(i + 2) # 4,8,16,32,64,128
- anchor = stride * 4
- poss = zip(*np.where(ocls[:, 1, :, :] > 0.05))
- for Iindex, hindex, windex in poss:
- axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride
- score = ocls[:, 1, hindex, windex]
- loc = oreg[:, :, hindex, windex].reshape(BB, 1, 4)
- priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]])
- priors = priors.reshape(1, 1, 4)
- # priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]).view(1, 1, 4)
- variances = [0.1, 0.2]
- box = batch_decode(loc, priors, variances)
- box = box[:, 0] * 1.0
- # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1)
- score = np.expand_dims(score,axis=1)
- bboxlist.append(np.concatenate([box, score], 1))
- bboxlist = np.array(bboxlist)
- if 0 == len(bboxlist):
- bboxlist = np.zeros((1, BB, 5))
-
- return bboxlist
\ No newline at end of file
diff --git a/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py b/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py
deleted file mode 100644
index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000
--- a/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py
+++ /dev/null
@@ -1,26 +0,0 @@
-import os
-import json
-import re
-
-# Define regular expression patterns
-pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)'
-
-# Load the .py file
-with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f:
- contents = f.read()
-
-# Load the .py files in the modules folder
-for filename in os.listdir("modules"):
- if filename.endswith(".py"):
- with open(os.path.join("modules", filename), "r", encoding="utf-8") as f:
- contents += f.read()
-
-# Matching with regular expressions
-matches = re.findall(pattern, contents, re.DOTALL)
-
-# Convert to key/value pairs
-data = {match.strip('()"'): '' for match in matches}
-
-# Save as a JSON file
-with open('labels.json', 'w', encoding='utf-8') as f:
- json.dump(data, f, ensure_ascii=False, indent=4)
\ No newline at end of file
diff --git a/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh b/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh
deleted file mode 100644
index c3b9794be0f3ceb12d4d0e1185074b57aaba8a49..0000000000000000000000000000000000000000
--- a/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-set -x
-
-EXP_DIR=logs_run_001
-PY_ARGS=${@:1}
-
-python -u main.py \
- --project_name CPC_HOTR_HICODET \
- --run_name ${EXP_DIR} \
- --HOIDet \
- --validate \
- --share_enc \
- --pretrained_dec \
- --use_consis \
- --share_dec_param \
- --epochs 90 \
- --lr_drop 60 \
- --lr 1e-4 \
- --lr_backbone 1e-5 \
- --ramp_up_epoch 30 \
- --path_id 0 \
- --num_hoi_queries 16 \
- --set_cost_idx 20 \
- --hoi_idx_loss_coef 1 \
- --hoi_act_loss_coef 10 \
- --backbone resnet50 \
- --hoi_consistency_loss_coef 0.5 \
- --hoi_idx_consistency_loss_coef 1 \
- --hoi_act_consistency_loss_coef 2 \
- --stop_grad_stage \
- --hoi_eos_coef 0.1 \
- --temperature 0.2 \
- --no_aux_loss \
- --hoi_aux_loss \
- --dataset_file hico-det \
- --data_path hico_20160224_det \
- --frozen_weights https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth \
- --output_dir checkpoints/hico_det/ \
- --augpath_name [\'p2\',\'p3\',\'p4\'] \
- ${PY_ARGS}
\ No newline at end of file
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
deleted file mode 100644
index c190cee6bdc7922b688ea75dc8f152fa15c24617..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# optimizer
-optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
-# runtime settings
-runner = dict(type='IterBasedRunner', max_iters=80000)
-checkpoint_config = dict(by_epoch=False, interval=8000)
-evaluation = dict(interval=8000, metric='mIoU')
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py
deleted file mode 100644
index 9207aa95e6730bd9b3362dee612059a5f0ce1c5e..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from annotator.uniformer.mmcv.cnn import PLUGIN_LAYERS, Scale
-
-
-def NEG_INF_DIAG(n, device):
- """Returns a diagonal matrix of size [n, n].
-
- The diagonal are all "-inf". This is for avoiding calculating the
- overlapped element in the Criss-Cross twice.
- """
- return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0)
-
-
-@PLUGIN_LAYERS.register_module()
-class CrissCrossAttention(nn.Module):
- """Criss-Cross Attention Module.
-
- .. note::
- Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch
- to a pure PyTorch and equivalent implementation. For more
- details, please refer to https://github.com/open-mmlab/mmcv/pull/1201.
-
- Speed comparison for one forward pass
-
- - Input size: [2,512,97,97]
- - Device: 1 NVIDIA GeForce RTX 2080 Ti
-
- +-----------------------+---------------+------------+---------------+
- | |PyTorch version|CUDA version|Relative speed |
- +=======================+===============+============+===============+
- |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x |
- +-----------------------+---------------+------------+---------------+
- |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x |
- +-----------------------+---------------+------------+---------------+
-
- Args:
- in_channels (int): Channels of the input feature map.
- """
-
- def __init__(self, in_channels):
- super().__init__()
- self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
- self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
- self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
- self.gamma = Scale(0.)
- self.in_channels = in_channels
-
- def forward(self, x):
- """forward function of Criss-Cross Attention.
-
- Args:
- x (Tensor): Input feature. \
- shape (batch_size, in_channels, height, width)
- Returns:
- Tensor: Output of the layer, with shape of \
- (batch_size, in_channels, height, width)
- """
- B, C, H, W = x.size()
- query = self.query_conv(x)
- key = self.key_conv(x)
- value = self.value_conv(x)
- energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG(
- H, query.device)
- energy_H = energy_H.transpose(1, 2)
- energy_W = torch.einsum('bchw,bchj->bhwj', query, key)
- attn = F.softmax(
- torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)]
- out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H])
- out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:])
-
- out = self.gamma(out) + x
- out = out.contiguous()
-
- return out
-
- def __repr__(self):
- s = self.__class__.__name__
- s += f'(in_channels={self.in_channels})'
- return s
diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py b/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py
deleted file mode 100644
index 55b2aeea120bbe51ca837265fcb7fbff467e55f2..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""The singleton metaclass for ensuring only one instance of a class."""
-import abc
-
-
-class Singleton(abc.ABCMeta, type):
- """
- Singleton metaclass for ensuring only one instance of a class.
- """
-
- _instances = {}
-
- def __call__(cls, *args, **kwargs):
- """Call method for the singleton metaclass."""
- if cls not in cls._instances:
- cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
- return cls._instances[cls]
-
-
-class AbstractSingleton(abc.ABC, metaclass=Singleton):
- """
- Abstract singleton class for ensuring only one instance of a class.
- """
-
- pass
diff --git a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py b/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py
deleted file mode 100644
index ecbac9b73bd9ad872931d77e144dd853b3d8ef64..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""Unit tests for the commands module"""
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-import autogpt.agent.agent_manager as agent_manager
-from autogpt.app import execute_command, list_agents, start_agent
-
-
-@pytest.mark.integration_test
-def test_make_agent() -> None:
- """Test the make_agent command"""
- with patch("openai.ChatCompletion.create") as mock:
- obj = MagicMock()
- obj.response.choices[0].messages[0].content = "Test message"
- mock.return_value = obj
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat" == agents
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat\n1: write" == agents
diff --git a/spaces/MohammedAlakhras/Telegram_API/app.py b/spaces/MohammedAlakhras/Telegram_API/app.py
deleted file mode 100644
index 139f1b8166a34939a01e92d5b6148364b006622b..0000000000000000000000000000000000000000
--- a/spaces/MohammedAlakhras/Telegram_API/app.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import gradio as gr
-from telethon.sync import TelegramClient, events
-import datetime
-import socks
-import time
-import os
-import requests
-import random
-
-
-
-
-
-
-
-
-
-def download_file(url, filename):
- response = requests.get(url)
- with open(filename, 'wb') as file:
- file.write(response.content)
-
-
-RndMssgs = [
- "I've received your message. I'll get back to you as soon as possible.",
- "I appreciate your patience and understanding.",
- "Your message has been received. I'll respond shortly.",
- "I'm currently unavailable, but I'll reply as soon as I can.",
- "Your message has been received and it's important to us.",
- "I'm away at the moment. Rest assured, I'll respond to your message soon.",
- "Your message is received. Thank you for your understanding.",
- "I've received your message and will respond at my earliest convenience.",
- "I've received your message. I'll make sure to reply as soon as possible.",
- "I'm currently out offline, but I'll get back to you shortly.",
- "I've received your message. I'll get back to you soon.",
- "I'm not available right now, but I've received your message.",
- "Thank you for contacting me. I'll respond as soon as I can.",
- "I'm currently busy, but I'll make sure to reply to your message as soon as possible.",
- "Your message has been received. I'll get back to you shortly.",
- "Thank you for your patience. I'll respond as soon as I can.",
- "I've received your message and will get back to you shortly.",
- "Your message has been received. Thank you for waiting.",
- "I'm currently on a break, but I've received your message and will respond soon."
-]
-
-
-
-
-
-proxy_server = '142.93.68.63'
-proxy_port = 2434
-proxy_secret = 'ee32b920dffb51643028e2f6b878d4eac1666172616b61762e636f6d'
-proxy_dc_id = 2 # This is usually 2 for MTProto proxies
-
-proxy = (
- socks.SOCKS5,
- proxy_server,
- proxy_port,
- True,
- 'vpn',
- 'unlimited'
-)
-
-api_id=os.environ['apiID']
-api_hash=os.environ['apiHash']
-phone=os.environ['phone']
-username=os.environ['username']
-
-serssionFile=os.environ['sessionUrlFile']
-
-download_file(serssionFile, 'anon.session')
-
-
-
-# Dictionary to track the times when senders were last replied to
-reply_times = {}
-
-async def main():
- async with TelegramClient('anon', api_id, api_hash) as client:
- @client.on(events.NewMessage())
- async def my_event_handler(event):
- sender = await event.get_sender()
- sender_id = sender.id
- sender_name = sender.first_name
- chat = await event.get_chat()
- chat_id = chat.id
- text = event.raw_text
-
- # Personal message
- if chat_id == sender_id and not sender.bot:
- # Check the last reply to this sender
- last_reply_time = reply_times.get(str(sender_id), None)
- if last_reply_time is None or time.time() - last_reply_time > 60*60*6: # reply only if not replied in the last minute
- response = f'Hello {sender_name} 👋🏻 ,\n {random.choice(RndMssgs)} 😊'
- await client.send_message(chat_id, response, parse_mode='HTML')
- reply_times[str(sender_id)] = time.time() # update the last reply time
-
- # Group message
- elif username in text:
- last_reply_time = reply_times.get(str(str(chat_id)+str(sender_id)), None)
- if last_reply_time is None or time.time() - last_reply_time > 60*5:
- response = f'Hello {sender_name} @ {chat.title} 👋🏻,\n {random.choice(RndMssgs)} 😊'
- await client.send_message(chat_id, response, parse_mode='HTML')
- reply_times[str(str(chat_id)+str(sender_id))] = time.time()
-
-
- await client.run_until_disconnected()
-
-
-# Gradio Inteface
-inputs = []
-output = "text"
-gr.Interface(fn=main, inputs=inputs, outputs=output).launch()
-
- # client.loop.run_until_complete(main())
\ No newline at end of file
diff --git a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md b/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md
deleted file mode 100644
index e6ea0c7481d86560e929561bb6afe8e9217cabb8..0000000000000000000000000000000000000000
--- a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: WhisperGPTMultiAgentSystems
-emoji: 📚
-colorFrom: green
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.28.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py b/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py
deleted file mode 100644
index 2f3463b64f988aa61d90838ddcf8ac89053c3377..0000000000000000000000000000000000000000
--- a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py
+++ /dev/null
@@ -1,174 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.autograd import *
-from . import utils
-
-from .CaptionModel import CaptionModel
-
-class ShowTellModel(CaptionModel):
- def __init__(self, opt):
- super(ShowTellModel, self).__init__()
- self.vocab_size = opt.vocab_size
- self.input_encoding_size = opt.input_encoding_size
- self.rnn_type = opt.rnn_type
- self.rnn_size = opt.rnn_size
- self.num_layers = opt.num_layers
- self.drop_prob_lm = opt.drop_prob_lm
- self.seq_length = opt.seq_length
- self.fc_feat_size = opt.fc_feat_size
-
- self.ss_prob = 0.0 # Schedule sampling probability
-
- self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
- self.core = getattr(nn, self.rnn_type.upper())(self.input_encoding_size, self.rnn_size, self.num_layers, bias=False, dropout=self.drop_prob_lm)
- self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
- self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
- self.dropout = nn.Dropout(self.drop_prob_lm)
-
- self.init_weights()
-
- def init_weights(self):
- initrange = 0.1
- self.embed.weight.data.uniform_(-initrange, initrange)
- self.logit.bias.data.fill_(0)
- self.logit.weight.data.uniform_(-initrange, initrange)
-
- def init_hidden(self, bsz):
- weight = self.logit.weight
- if self.rnn_type == 'lstm':
- return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
- weight.new_zeros(self.num_layers, bsz, self.rnn_size))
- else:
- return weight.new_zeros(self.num_layers, bsz, self.rnn_size)
-
- def _forward(self, fc_feats, att_feats, seq, att_masks=None):
- batch_size = fc_feats.size(0)
- seq_per_img = seq.shape[0] // batch_size
- state = self.init_hidden(batch_size*seq_per_img)
- outputs = []
-
- if seq_per_img > 1:
- fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)
-
- for i in range(seq.size(1) + 1):
- if i == 0:
- xt = self.img_embed(fc_feats)
- else:
- if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
- sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)
- sample_mask = sample_prob < self.ss_prob
- if sample_mask.sum() == 0:
- it = seq[:, i-1].clone()
- else:
- sample_ind = sample_mask.nonzero().view(-1)
- it = seq[:, i-1].data.clone()
- #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
- #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
- prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
- it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
- else:
- it = seq[:, i-1].clone()
- # break if all the sequences end
- if i >= 2 and seq[:, i-1].data.sum() == 0:
- break
- xt = self.embed(it)
-
- output, state = self.core(xt.unsqueeze(0), state)
- output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
- outputs.append(output)
-
- return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
-
- def get_logprobs_state(self, it, state):
- # 'it' contains a word index
- xt = self.embed(it)
-
- output, state = self.core(xt.unsqueeze(0), state)
- logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
-
- return logprobs, state
-
- def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
- beam_size = opt.get('beam_size', 10)
- batch_size = fc_feats.size(0)
-
- assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
- seq = torch.LongTensor(self.seq_length, batch_size).zero_()
- seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
- # lets process every image independently for now, for simplicity
-
- self.done_beams = [[] for _ in range(batch_size)]
- for k in range(batch_size):
- state = self.init_hidden(beam_size)
- for t in range(2):
- if t == 0:
- xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
- elif t == 1: # input
- it = fc_feats.data.new(beam_size).long().zero_()
- xt = self.embed(it)
-
- output, state = self.core(xt.unsqueeze(0), state)
- logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
-
- self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)
- seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
- seqLogprobs[:, k] = self.done_beams[k][0]['logps']
- # return the samples and their log likelihoods
- return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
-
- def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
- sample_method = opt.get('sample_method', 'greedy')
- beam_size = opt.get('beam_size', 1)
- temperature = opt.get('temperature', 1.0)
- if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
- return self.sample_beam(fc_feats, att_feats, opt)
-
- batch_size = fc_feats.size(0)
- state = self.init_hidden(batch_size)
- seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)
- seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length)
- for t in range(self.seq_length + 2):
- if t == 0:
- xt = self.img_embed(fc_feats)
- else:
- if t == 1: # input
- it = fc_feats.data.new(batch_size).long().zero_()
- xt = self.embed(it)
-
- output, state = self.core(xt.unsqueeze(0), state)
- logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
-
- # sample the next word
- if t == self.seq_length + 1: # skip if we achieve maximum length
- break
- if sample_method == 'greedy':
- sampleLogprobs, it = torch.max(logprobs.data, 1)
- it = it.view(-1).long()
- else:
- if temperature == 1.0:
- prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
- else:
- # scale logprobs by temperature
- prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
- it = torch.multinomial(prob_prev, 1).to(logprobs.device)
- sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
- it = it.view(-1).long() # and flatten indices for downstream processing
-
- if t >= 1:
- # stop when all finished
- if t == 1:
- unfinished = it > 0
- else:
- unfinished = unfinished & (it > 0)
- it = it * unfinished.type_as(it)
- seq[:,t-1] = it #seq[t] the input of t+2 time step
- seqLogprobs[:,t-1] = sampleLogprobs.view(-1)
- if unfinished.sum() == 0:
- break
-
- return seq, seqLogprobs
\ No newline at end of file
diff --git a/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py b/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py
deleted file mode 100644
index 09555af09720382a795712f0fdd9b711c5b19e02..0000000000000000000000000000000000000000
--- a/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py
+++ /dev/null
@@ -1,263 +0,0 @@
-import json
-import re
-import six
-from six.moves import range # pylint: disable=redefined-builtin
-
-PAD = ""
-EOS = ""
-UNK = ""
-SEG = "|"
-PUNCS = '!,.?;:'
-RESERVED_TOKENS = [PAD, EOS, UNK]
-NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
-PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
-EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
-UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2
-
-if six.PY2:
- RESERVED_TOKENS_BYTES = RESERVED_TOKENS
-else:
- RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
-
-# Regular expression for unescaping token strings.
-# '\u' is converted to '_'
-# '\\' is converted to '\'
-# '\213;' is converted to unichr(213)
-_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
-_ESCAPE_CHARS = set(u"\\_u;0123456789")
-
-
-def strip_ids(ids, ids_to_strip):
- """Strip ids_to_strip from the end ids."""
- ids = list(ids)
- while ids and ids[-1] in ids_to_strip:
- ids.pop()
- return ids
-
-
-class TextEncoder(object):
- """Base class for converting from ints to/from human readable strings."""
-
- def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
- self._num_reserved_ids = num_reserved_ids
-
- @property
- def num_reserved_ids(self):
- return self._num_reserved_ids
-
- def encode(self, s):
- """Transform a human-readable string into a sequence of int ids.
-
- The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
- num_reserved_ids) are reserved.
-
- EOS is not appended.
-
- Args:
- s: human-readable string to be converted.
-
- Returns:
- ids: list of integers
- """
- return [int(w) + self._num_reserved_ids for w in s.split()]
-
- def decode(self, ids, strip_extraneous=False):
- """Transform a sequence of int ids into a human-readable string.
-
- EOS is not expected in ids.
-
- Args:
- ids: list of integers to be converted.
- strip_extraneous: bool, whether to strip off extraneous tokens
- (EOS and PAD).
-
- Returns:
- s: human-readable string.
- """
- if strip_extraneous:
- ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
- return " ".join(self.decode_list(ids))
-
- def decode_list(self, ids):
- """Transform a sequence of int ids into a their string versions.
-
- This method supports transforming individual input/output ids to their
- string versions so that sequence to/from text conversions can be visualized
- in a human readable format.
-
- Args:
- ids: list of integers to be converted.
-
- Returns:
- strs: list of human-readable string.
- """
- decoded_ids = []
- for id_ in ids:
- if 0 <= id_ < self._num_reserved_ids:
- decoded_ids.append(RESERVED_TOKENS[int(id_)])
- else:
- decoded_ids.append(id_ - self._num_reserved_ids)
- return [str(d) for d in decoded_ids]
-
- @property
- def vocab_size(self):
- raise NotImplementedError()
-
-
-class TokenTextEncoder(TextEncoder):
- """Encoder based on a user-supplied vocabulary (file or list)."""
-
- def __init__(self,
- vocab_filename,
- reverse=False,
- vocab_list=None,
- replace_oov=None,
- num_reserved_ids=NUM_RESERVED_TOKENS):
- """Initialize from a file or list, one token per line.
-
- Handling of reserved tokens works as follows:
- - When initializing from a list, we add reserved tokens to the vocab.
- - When initializing from a file, we do not add reserved tokens to the vocab.
- - When saving vocab files, we save reserved tokens to the file.
-
- Args:
- vocab_filename: If not None, the full filename to read vocab from. If this
- is not None, then vocab_list should be None.
- reverse: Boolean indicating if tokens should be reversed during encoding
- and decoding.
- vocab_list: If not None, a list of elements of the vocabulary. If this is
- not None, then vocab_filename should be None.
- replace_oov: If not None, every out-of-vocabulary token seen when
- encoding will be replaced by this string (which must be in vocab).
- num_reserved_ids: Number of IDs to save for reserved tokens like .
- """
- super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids)
- self._reverse = reverse
- self._replace_oov = replace_oov
- if vocab_filename:
- self._init_vocab_from_file(vocab_filename)
- else:
- assert vocab_list is not None
- self._init_vocab_from_list(vocab_list)
- self.pad_index = self.token_to_id[PAD]
- self.eos_index = self.token_to_id[EOS]
- self.unk_index = self.token_to_id[UNK]
- self.seg_index = self.token_to_id[SEG] if SEG in self.token_to_id else self.eos_index
-
- def encode(self, s):
- """Converts a space-separated string of tokens to a list of ids."""
- sentence = s
- tokens = sentence.strip().split()
- if self._replace_oov is not None:
- tokens = [t if t in self.token_to_id else self._replace_oov
- for t in tokens]
- ret = [self.token_to_id[tok] for tok in tokens]
- return ret[::-1] if self._reverse else ret
-
- def decode(self, ids, strip_eos=False, strip_padding=False):
- if strip_padding and self.pad() in list(ids):
- pad_pos = list(ids).index(self.pad())
- ids = ids[:pad_pos]
- if strip_eos and self.eos() in list(ids):
- eos_pos = list(ids).index(self.eos())
- ids = ids[:eos_pos]
- return " ".join(self.decode_list(ids))
-
- def decode_list(self, ids):
- seq = reversed(ids) if self._reverse else ids
- return [self._safe_id_to_token(i) for i in seq]
-
- @property
- def vocab_size(self):
- return len(self.id_to_token)
-
- def __len__(self):
- return self.vocab_size
-
- def _safe_id_to_token(self, idx):
- return self.id_to_token.get(idx, "ID_%d" % idx)
-
- def _init_vocab_from_file(self, filename):
- """Load vocab from a file.
-
- Args:
- filename: The file to load vocabulary from.
- """
- with open(filename) as f:
- tokens = [token.strip() for token in f.readlines()]
-
- def token_gen():
- for token in tokens:
- yield token
-
- self._init_vocab(token_gen(), add_reserved_tokens=False)
-
- def _init_vocab_from_list(self, vocab_list):
- """Initialize tokens from a list of tokens.
-
- It is ok if reserved tokens appear in the vocab list. They will be
- removed. The set of tokens in vocab_list should be unique.
-
- Args:
- vocab_list: A list of tokens.
- """
-
- def token_gen():
- for token in vocab_list:
- if token not in RESERVED_TOKENS:
- yield token
-
- self._init_vocab(token_gen())
-
- def _init_vocab(self, token_generator, add_reserved_tokens=True):
- """Initialize vocabulary with tokens from token_generator."""
-
- self.id_to_token = {}
- non_reserved_start_index = 0
-
- if add_reserved_tokens:
- self.id_to_token.update(enumerate(RESERVED_TOKENS))
- non_reserved_start_index = len(RESERVED_TOKENS)
-
- self.id_to_token.update(
- enumerate(token_generator, start=non_reserved_start_index))
-
- # _token_to_id is the reverse of _id_to_token
- self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token))
-
- def pad(self):
- return self.pad_index
-
- def eos(self):
- return self.eos_index
-
- def unk(self):
- return self.unk_index
-
- def seg(self):
- return self.seg_index
-
- def store_to_file(self, filename):
- """Write vocab file to disk.
-
- Vocab files have one token per line. The file ends in a newline. Reserved
- tokens are written to the vocab file as well.
-
- Args:
- filename: Full path of the file to store the vocab to.
- """
- with open(filename, "w") as f:
- for i in range(len(self.id_to_token)):
- f.write(self.id_to_token[i] + "\n")
-
- def sil_phonemes(self):
- return [p for p in self.id_to_token.values() if is_sil_phoneme(p)]
-
-
-def build_token_encoder(token_list_file):
- token_list = json.load(open(token_list_file))
- return TokenTextEncoder(None, vocab_list=token_list, replace_oov='')
-
-
-def is_sil_phoneme(p):
- return p == '' or not p[0].isalpha()
diff --git a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py b/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py
deleted file mode 100644
index 630eca427e478dbadad58bd94b56e89a5a747526..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py
+++ /dev/null
@@ -1,324 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-"""Genetic algorithm for BF tasks.
-
-Also contains the uniform random search algorithm.
-
-Inspired by https://github.com/primaryobjects/AI-Programmer.
-GA function code borrowed from https://github.com/DEAP/deap.
-"""
-
-import cPickle
-import os
-import sys
-from time import sleep
-
-from absl import flags
-from absl import logging
-import numpy as np
-from six.moves import xrange
-import tensorflow as tf
-
-from common import utils # brain coder
-from single_task import data # brain coder
-from single_task import defaults # brain coder
-from single_task import ga_lib # brain coder
-from single_task import results_lib # brain coder
-
-FLAGS = flags.FLAGS
-
-
-def define_tuner_hparam_space(hparam_space_type):
- """Define tunable hparams for grid search."""
- if hparam_space_type != 'ga':
- raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type)
- return {
- 'population_size': [10, 25, 50, 100, 500],
- 'crossover_rate': [0.2, 0.5, 0.7, 0.9, 0.95],
- 'mutation_rate': [0.01, 0.03, 0.05, 0.1, 0.15]}
-
-
-def write_hparams_to_config(config, hparams, hparam_space_type):
- """Write hparams given by the tuner into the Config object."""
- if hparam_space_type != 'ga':
- raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type)
- config.batch_size = hparams.population_size
- config.agent.crossover_rate = hparams.crossover_rate
- config.agent.mutation_rate = hparams.mutation_rate
-
-
-class CheckpointWriter(object):
- """Manages loading and saving GA populations to disk.
-
- This object is used by the genetic algorithm to save progress periodically
- so that a recent population can be loaded from disk in the event of a restart.
- """
-
- def __init__(self, checkpoint_dir, population_size):
- self.checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pickle')
- self.population_size = population_size
-
- def write(self, gen, population, halloffame):
- """Write GA state to disk.
-
- Overwrites previous saved state.
-
- Args:
- gen: Generation number.
- population: List of Individual objects.
- halloffame: Hall-of-fame buffer. Typically a priority queue.
- """
- raw = cPickle.dumps((gen, population, halloffame))
- with tf.gfile.FastGFile(self.checkpoint_file, 'w') as f:
- f.write(raw)
-
- def load(self):
- """Loads GA state from disk.
-
- Loads whatever is on disk, which will be whatever the most recent call
- to `write` wrote.
-
- Returns:
- gen: Generation number.
- population: List of Individual objects.
- halloffame: Hall-of-fame buffer. Typically a priority queue.
- """
- with tf.gfile.FastGFile(self.checkpoint_file, 'r') as f:
- raw = f.read()
- objs = cPickle.loads(raw)
- # Validate data.
- assert isinstance(objs, tuple) and len(objs) == 3, (
- 'Expecting a 3-tuple, but got %s instead.' % (objs,))
- gen, population, halloffame = objs
- assert isinstance(gen, int), (
- 'Expecting `gen` to be an integer, got %s' % (gen,))
- assert (
- isinstance(population, list)
- and len(population) == self.population_size
- ), (
- 'Expecting `population` to be a list with size %d, got %s'
- % (self.population_size, population))
- assert halloffame is None or len(halloffame) == 2, (
- 'Expecting hall-of-fame object to have length two, got length %d'
- % len(halloffame))
- logging.info('Loaded pop from checkpoint file: "%s".',
- self.checkpoint_file)
- return gen, population, halloffame
-
- def has_checkpoint(self):
- """Checks if a checkpoint exists on disk, and if so returns True."""
- return tf.gfile.Exists(self.checkpoint_file)
-
-
-def run_training(config=None, tuner=None, logdir=None, trial_name=None, # pylint: disable=unused-argument
- is_chief=True):
- """Do all training runs.
-
- This is the top level training function for policy gradient based models.
- Run this from the main function.
-
- Args:
- config: config_lib.Config instance containing global config (agent and
- environment hparams). If None, config will be parsed from FLAGS.config.
- tuner: (unused) A tuner instance. Leave as None if not tuning.
- logdir: Parent directory where all data from all runs will be written. If
- None, FLAGS.logdir will be used.
- trial_name: (unused) If tuning, set this to a unique string that identifies
- this trial. If `tuner` is not None, this also must be set.
- is_chief: True if this worker is the chief.
-
- Returns:
- List of results dicts which were written to disk. Each training run gets a
- results dict. Results dict contains metrics, i.e. (name, value) pairs which
- give information about the training run.
-
- Raises:
- ValueError: If FLAGS.num_workers does not divide FLAGS.num_repetitions.
- ValueError: If results dicts read from disk contain invalid data.
- """
- if not config:
- # If custom config is not given, get it from flags.
- config = defaults.default_config_with_updates(FLAGS.config)
- if not logdir:
- logdir = FLAGS.logdir
-
- if FLAGS.num_repetitions % FLAGS.num_workers != 0:
- raise ValueError('Number of workers must divide number of repetitions')
- num_local_reps = FLAGS.num_repetitions // FLAGS.num_workers
- logging.info('Running %d reps globally.', FLAGS.num_repetitions)
- logging.info('This worker will run %d local reps.', num_local_reps)
- if FLAGS.max_npe:
- max_generations = FLAGS.max_npe // config.batch_size
- logging.info('Max samples per rep: %d', FLAGS.max_npe)
- logging.info('Max generations per rep: %d', max_generations)
- else:
- max_generations = sys.maxint
- logging.info('Running unlimited generations.')
-
- assert FLAGS.num_workers > 0
- logging.info('Starting experiment. Directory: "%s"', logdir)
- results = results_lib.Results(logdir, FLAGS.task_id)
- local_results_list = results.read_this_shard()
- if local_results_list:
- if local_results_list[0]['max_npe'] != FLAGS.max_npe:
- raise ValueError(
- 'Cannot resume training. Max-NPE changed. Was %s, now %s',
- local_results_list[0]['max_npe'], FLAGS.max_npe)
- if local_results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions:
- raise ValueError(
- 'Cannot resume training. Number of repetitions changed. Was %s, '
- 'now %s',
- local_results_list[0]['max_global_repetitions'],
- FLAGS.num_repetitions)
- start_rep = len(local_results_list)
-
- for rep in xrange(start_rep, num_local_reps):
- global_rep = num_local_reps * FLAGS.task_id + rep
- logging.info(
- 'Starting repetition: Rep = %d. (global rep = %d)',
- rep, global_rep)
-
- # Save data for each rep, like checkpoints, goes into separate folders.
- run_dir = os.path.join(logdir, 'run_%d' % global_rep)
-
- if not tf.gfile.IsDirectory(run_dir):
- tf.gfile.MakeDirs(run_dir)
- checkpoint_writer = CheckpointWriter(run_dir,
- population_size=config.batch_size)
-
- data_manager = data.DataManager(config, run_number=global_rep)
- task_eval_fn = ga_lib.make_task_eval_fn(data_manager.rl_task)
-
- if config.agent.algorithm == 'rand':
- logging.info('Running random search.')
- assert FLAGS.max_npe
- result = run_random_search(
- FLAGS.max_npe, run_dir, task_eval_fn, config.timestep_limit)
- else:
- assert config.agent.algorithm == 'ga'
- logging.info('Running genetic algorithm.')
- pop = ga_lib.make_population(
- ga_lib.random_individual(config.timestep_limit),
- n=config.batch_size)
- hof = utils.MaxUniquePriorityQueue(2) # Hall of fame.
- result = ga_lib.ga_loop(
- pop,
- cxpb=config.agent.crossover_rate, mutpb=config.agent.mutation_rate,
- task_eval_fn=task_eval_fn,
- ngen=max_generations, halloffame=hof,
- checkpoint_writer=checkpoint_writer)
-
- logging.info('Finished rep. Num gens: %d', result.generations)
-
- results_dict = {
- 'max_npe': FLAGS.max_npe,
- 'batch_size': config.batch_size,
- 'max_batches': FLAGS.max_npe // config.batch_size,
- 'npe': result.num_programs,
- 'max_global_repetitions': FLAGS.num_repetitions,
- 'max_local_repetitions': num_local_reps,
- 'code_solution': result.best_code if result.solution_found else '',
- 'best_reward': result.reward,
- 'num_batches': result.generations,
- 'found_solution': result.solution_found,
- 'task': data_manager.task_name,
- 'global_rep': global_rep}
- logging.info('results_dict: %s', results_dict)
- results.append(results_dict)
-
- if is_chief:
- logging.info(
- 'Worker is chief. Waiting for all workers to finish so that results '
- 'can be reported to the tuner.')
-
- global_results_list, shard_stats = results.read_all(
- num_shards=FLAGS.num_workers)
- while not all(s.finished for s in shard_stats):
- logging.info(
- 'Still waiting on these workers: %s',
- ', '.join(
- ['%d (%d reps left)'
- % (i, s.max_local_reps - s.num_local_reps_completed)
- for i, s in enumerate(shard_stats)
- if not s.finished]))
- sleep(60)
- global_results_list, shard_stats = results.read_all(
- num_shards=FLAGS.num_workers)
-
- logging.info(
- '%d results obtained. Chief worker is exiting the experiment.',
- len(global_results_list))
-
- return global_results_list
-
-
-def run_random_search(max_num_programs, checkpoint_dir, task_eval_fn,
- timestep_limit):
- """Run uniform random search routine.
-
- Randomly samples programs from a uniform distribution until either a valid
- program is found, or the maximum NPE is reached. Results are written to disk
- and returned.
-
- Args:
- max_num_programs: Maximum NPE (number of programs executed). If no solution
- is found after this many programs are tried, the run is stopped and
- considered a failure.
- checkpoint_dir: Where to save state during the run.
- task_eval_fn: Function that maps code string to result containing total
- reward and info about success.
- timestep_limit: Maximum length of code strings.
-
- Returns:
- ga_lib.GaResult namedtuple instance. This contains the best code and highest
- reward found.
- """
- checkpoint_file = os.path.join(checkpoint_dir, 'random_search.txt')
- num_programs_seen = 0
- found_solution = False
- best_code = ''
- best_reward = 0.0
- if tf.gfile.Exists(checkpoint_file):
- try:
- with tf.gfile.FastGFile(checkpoint_file, 'r') as f:
- lines = list(f)
- num_programs_seen = int(lines[0])
- found_solution = bool(int(lines[1]))
- if found_solution:
- best_code = lines[2]
- best_reward = float(lines[3])
- except: # pylint: disable=bare-except
- pass
-
- while not found_solution and num_programs_seen < max_num_programs:
- if num_programs_seen % 1000 == 0:
- logging.info('num_programs_seen = %d', num_programs_seen)
- with tf.gfile.FastGFile(checkpoint_file, 'w') as f:
- f.write(str(num_programs_seen) + '\n')
- f.write(str(int(found_solution)) + '\n')
-
- code = np.random.choice(ga_lib.GENES, timestep_limit).tolist()
- res = task_eval_fn(code)
- found_solution = res.correct
- num_programs_seen += 1
-
- if found_solution:
- best_code = ''.join(code)
- best_reward = res.reward
-
- logging.info('num_programs_seen = %d', num_programs_seen)
- logging.info('found solution: %s', found_solution)
- with tf.gfile.FastGFile(checkpoint_file, 'w') as f:
- f.write(str(num_programs_seen) + '\n')
- f.write(str(int(found_solution)) + '\n')
- if found_solution:
- f.write(best_code + '\n')
- f.write(str(best_reward) + '\n')
-
- return ga_lib.GaResult(
- population=[], best_code=best_code, reward=best_reward,
- solution_found=found_solution, generations=num_programs_seen,
- num_programs=num_programs_seen, max_generations=max_num_programs,
- max_num_programs=max_num_programs)
diff --git a/spaces/Nee001/bing0/src/components/chat-message.tsx b/spaces/Nee001/bing0/src/components/chat-message.tsx
deleted file mode 100644
index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/src/components/chat-message.tsx
+++ /dev/null
@@ -1,93 +0,0 @@
-import remarkGfm from 'remark-gfm'
-import remarkMath from 'remark-math'
-import supersub from 'remark-supersub'
-import remarkBreaks from 'remark-breaks'
-import { cn } from '@/lib/utils'
-import { CodeBlock } from '@/components/ui/codeblock'
-import { MemoizedReactMarkdown } from '@/components/markdown'
-import { LearnMore } from './learn-more'
-import { ChatMessageModel } from '@/lib/bots/bing/types'
-import { useEffect } from 'react'
-import { TurnCounter } from './turn-counter'
-
-export interface ChatMessageProps {
- message: ChatMessageModel
-}
-
-export function ChatMessage({ message, ...props }: ChatMessageProps) {
- useEffect(() => {
- if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) {
- window.scrollBy(0, 200)
- }
- }, [message.text])
-
- return message.text ? (
-
', unsafe_allow_html=True)
-# st.write("Summarization Results for Model 3")
- st.write("Model name: sshleifer/distilbart-cnn-12-6")
- st.write("Rouge Scores: ")
- st.write("Rouge 1 Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rouge1"])
- st.write("Rouge 2 Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rouge2"])
- st.write("Rouge L Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rougeL"])
- st.write("Rouge LSum Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rougeLsum"])
-
- st.write("Result: ", analysis["sshleifer/distilbart-cnn-12-6"]["Result"])
-
-
-
-
-#OBJECT DETECTION
-
-def yolo_tiny(name):
- image = read_image(name)
-
- model = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny')
- image_processor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny")
-
- inputs = image_processor(images=image, return_tensors="pt")
- outputs = model(**inputs)
-
- # model predicts bounding boxes and corresponding COCO classes
- logits = outputs.logits
- bboxes = outputs.pred_boxes
-
-
- # print results
- target_sizes = torch.tensor([image.shape[::-1][:2]])
-
- results = image_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0]
-
- label_ = []
- bboxes = []
-
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
- box = [round(i, 2) for i in box.tolist()]
- print(
- f"Detected {model.config.id2label[label.item()]} with confidence "
- f"{round(score.item(), 3)} at location {box}"
- )
-
- label_.append(model.config.id2label[label.item()])
- bboxes.append(np.asarray(box,dtype="int"))
- bboxes = torch.tensor(bboxes, dtype=torch.int)
-
- img=draw_bounding_boxes(image, bboxes,labels = label_, width=3)
- img = torchvision.transforms.ToPILImage()(img)
- return img
-# img.show()
-
-
-
-def resnet_101(name):
- image = read_image(name)
- processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-101")
- model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-101")
-
- inputs = processor(images=image, return_tensors="pt")
- outputs = model(**inputs)
-
- # convert outputs (bounding boxes and class logits) to COCO API
- # let's only keep detections with score > 0.9
- target_sizes = torch.tensor([image.shape[::-1][:2]])
- results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0]
- label_ = []
- bboxes = []
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
- box = [round(i, 2) for i in box.tolist()]
- print(
- f"Detected {model.config.id2label[label.item()]} with confidence "
- f"{round(score.item(), 3)} at location {box}")
- label_.append(model.config.id2label[label.item()])
- bboxes.append(np.asarray(box,dtype="int"))
- bboxes = torch.tensor(bboxes, dtype=torch.int)
-
-
- bboxes = torch.tensor(bboxes, dtype=torch.int)
-
- img=draw_bounding_boxes(image, bboxes,labels = label_, width=3)
- img = torchvision.transforms.ToPILImage()(img)
- return img
-
-
-
-
-
-def resnet_50(name):
- image = read_image(name)
- processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
- model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
-
- inputs = processor(images=image, return_tensors="pt")
- outputs = model(**inputs)
-
- # convert outputs (bounding boxes and class logits) to COCO API
- # let's only keep detections with score > 0.9
- target_sizes = torch.tensor([image.shape[::-1][:2]])
- results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0]
- label_ = []
- bboxes = []
- for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
- box = [round(i, 2) for i in box.tolist()]
- print(
- f"Detected {model.config.id2label[label.item()]} with confidence "
- f"{round(score.item(), 3)} at location {box}"
- )
- label_.append(model.config.id2label[label.item()])
- bboxes.append(np.asarray(box,dtype="int"))
- bboxes = torch.tensor(bboxes, dtype=torch.int)
-
- bboxes = torch.tensor(bboxes, dtype=torch.int)
-
- img=draw_bounding_boxes(image, bboxes,labels = label_, width=3)
- img = torchvision.transforms.ToPILImage()(img)
- return img
-
-
-
-def object_detection():
-# st.write("Upload your image: ")
-# uploaded_files = ""
-# uploaded_files = st.file_uploader("Choose a image file", accept_multiple_files=True)
-
- option = st.selectbox(
- 'What image you want for analysis?',
- ("Choose an image for object detection analysis from the options below:",'Cat and Dog', '2 lazy cats chilling on a couch', 'An astronaut riding wild horse'))
-
- if option == 'Cat and Dog' or option == '2 lazy cats chilling on a couch' or option == 'An astronaut riding wild horse':
- st.write('You selected:', option)
-
- if option == 'Cat and Dog':
- name = "cat_dog.jpg"
- st.image("cat_dog.jpg")
-
- if option == '2 lazy cats chilling on a couch':
- name = "cat_remote.jpg"
- st.image("cat_remote.jpg")
-
- if option == 'An astronaut riding wild horse':
- name = "astronaut_rides_horse.png"
- st.image("astronaut_rides_horse.png")
-
- if st.button("Done"):
- # global file_data
-# st.write("filename:", uploaded_files)
-# for uploaded_file in uploaded_files:
- # print("here")
- # file_data = open(uploaded_file.name).read()
- st.write("filename:", name)
-# name = uploaded_file.name
- st.image([yolo_tiny(name),resnet_101(name),resnet_50(name)],caption=["hustvl/yolos-tiny","facebook/detr-resnet-101","facebook/detr-resnet-50"])
-
-
-def task_categorization_model_predictions():
- st.image("./panelup.png")
-
- # st.title("Text Analysis App")
-
- data = ""
-
- classifier = pipeline("zero-shot-classification",model="facebook/bart-large-mnli")
-
- global check
-
- st.markdown(f'
{"Write down below the description of your AI application in few sentences:"}
', unsafe_allow_html=True)
-
- prompt = st.text_input(" ")
-
- st.write("")
- st.write("")
-
- if prompt != "":
- # sbert_saved_model = torch.load("Sbert_saved_model", map_location=torch.device('cpu')).to("cpu")
- # model = sbert_saved_model.to("cpu")
- # tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-mpnet-base-v2")
- # pipe = TextClassificationPipeline(model= model, tokenizer=tokenizer, return_all_scores=True)
- # # outputs a list of dicts like [[{'label': 'NEGATIVE', 'score': 0.0001223755971295759}, {'label': 'POSITIVE', 'score': 0.9998776316642761}]]
-
- # # prompt = ["What is the the best ai for putting text report into data table?","How can I generate car sales agreement with ai model?","AI model to detect burglar on 48 hours of cctv video footage","I need Ai model help me with rewriting 50 financial statements emails into one summary report ?","I need a model for extracting person from an image"]
- # # responses = pipe(prompt)
-
-
- # models_list = pd.read_csv("models.csv")
- # # st.write(get_top_3(prompt))
-
- # top_cat, top_models = get_top_3(prompt)
- # # prompt = input("Enter your AI task idea:")
- # # top_cats,cat_to_models = get_models(prompt)
-
- # # top_models = cat_to_models[top_cats[0]]
-
- # top_cat = " " + top_cat[0].upper() + top_cat[1:]
-
-
-
- st.markdown(f'
`);t=query("#_tmp_width")[0].clientWidth;return query("#_tmp_width").remove(),t}execTemplate(e,i){return"string"==typeof e&&i&&"object"==typeof i?e.replace(/\${([^}]+)?}/g,function(e,t){return i[t]||t}):e}marker(e,s,l={onlyFirst:!1,wholeWord:!1}){Array.isArray(s)||(s=null!=s&&""!==s?[s]:[]);let r=l.wholeWord;query(e).each(t=>{for(var e=t,i=/\((.|\n|\r)*)\<\/span\>/gi;-1!==e.innerHTML.indexOf('{e=(e="string"!=typeof e?String(e):e).replace(/[-[\]{}()*+?.,\\^$|#\s]/g,"\\$&").replace(/&/g,"&").replace(//g,"<");e=new RegExp((r?"\\b":"")+e+(r?"\\b":"")+"(?!([^<]+)?>)","i"+(l.onlyFirst?"":"g"));t.innerHTML=t.innerHTML.replace(e,e=>''+e+"")})})}lang(e,t){if(!e||null==this.settings.phrases||"string"!=typeof e||"<=>=".includes(e))return this.execTemplate(e,t);let i=this.settings.phrases[e];return null==i?(i=e,this.settings.warnNoPhrase&&(this.settings.missing||(this.settings.missing={}),this.settings.missing[e]="---",this.settings.phrases[e]="---",console.log(`Missing translation for "%c${e}%c", see %c w2utils.settings.phrases %c with value "---"`,"color: orange","","color: #999",""))):"---"!==i||this.settings.warnNoPhrase||(i=e),"---"===i&&(i=`---`),this.execTemplate(i,t)}locale(l,i,r){return new Promise((s,t)=>{if(Array.isArray(l)){this.settings.phrases={};let i=[],t={};l.forEach((e,t)=>{5===e.length&&(e="locale/"+e.toLowerCase()+".json",l[t]=e),i.push(this.locale(e,!0,!1))}),void Promise.allSettled(i).then(e=>{e.forEach(e=>{e.value&&(t[e.value.file]=e.value.data)}),l.forEach(e=>{this.settings=this.extend({},this.settings,t[e])}),s()})}else(l=l||"en-us")instanceof Object?this.settings=this.extend({},this.settings,w2locale,l):(5===l.length&&(l="locale/"+l.toLowerCase()+".json"),fetch(l,{method:"GET"}).then(e=>e.json()).then(e=>{!0!==r&&(this.settings=i?this.extend({},this.settings,e):this.extend({},this.settings,w2locale,{phrases:{}},e)),s({file:l,data:e})}).catch(e=>{console.log("ERROR: Cannot load locale "+l),t(e)}))})}scrollBarSize(){return this.tmp.scrollBarSize||(query("body").append(`
-
-
1
-
- `),this.tmp.scrollBarSize=100-query("#_scrollbar_width > div")[0].clientWidth,query("#_scrollbar_width").remove()),this.tmp.scrollBarSize}checkName(e){return null==e?(console.log('ERROR: Property "name" is required but not supplied.'),!1):null!=w2ui[e]?(console.log(`ERROR: Object named "${e}" is already registered as w2ui.${e}.`),!1):!!this.isAlphaNumeric(e)||(console.log('ERROR: Property "name" has to be alpha-numeric (a-z, 0-9, dash and underscore).'),!1)}checkUniqueId(t,i,s,l){Array.isArray(i)||(i=[i]);let r=!0;return i.forEach(e=>{e.id===t&&(console.log(`ERROR: The item id="${t}" is not unique within the ${s} "${l}".`,i),r=!1)}),r}encodeParams(t,i=""){let s="";return Object.keys(t).forEach(e=>{""!=s&&(s+="&"),"object"==typeof t[e]?s+=this.encodeParams(t[e],i+e+(i?"]":"")+"["):s+=""+i+e+(i?"]":"")+"="+t[e]}),s}parseRoute(e){let n=[];e=e.replace(/\/\(/g,"(?:/").replace(/\+/g,"__plus__").replace(/(\/)?(\.)?:(\w+)(?:(\(.*?\)))?(\?)?/g,(e,t,i,s,l,r)=>(n.push({name:s,optional:!!r}),t=t||"",(r?"":t)+"(?:"+(r?t:"")+(i||"")+(l||(i?"([^/.]+?)":"([^/]+?)"))+")"+(r||""))).replace(/([\/.])/g,"\\$1").replace(/__plus__/g,"(.+)").replace(/\*/g,"(.*)");return{path:new RegExp("^"+e+"$","i"),keys:n}}getCursorPosition(e){if(null==e)return null;let t=0;var i,s=e.ownerDocument||e.document,l=s.defaultView||s.parentWindow;let r;return["INPUT","TEXTAREA"].includes(e.tagName)?t=e.selectionStart:l.getSelection?0<(r=l.getSelection()).rangeCount&&((i=(l=r.getRangeAt(0)).cloneRange()).selectNodeContents(e),i.setEnd(l.endContainer,l.endOffset),t=i.toString().length):(r=s.selection)&&"Control"!==r.type&&(l=r.createRange(),(i=s.body.createTextRange()).moveToElementText(e),i.setEndPoint("EndToEnd",l),t=i.text.length),t}setCursorPosition(s,l,t){if(null!=s){var r=document.createRange();let i,e=window.getSelection();if(["INPUT","TEXTAREA"].includes(s.tagName))s.setSelectionRange(l,t??l);else{for(let t=0;t").replace(/&/g,"&").replace(/"/g,'"').replace(/ /g," "):e).length){(i=(i=s.childNodes[t]).childNodes&&0i.length&&(l=i.length),r.setStart(i,l),t?r.setEnd(i,t):r.collapse(!0),e.removeAllRanges(),e.addRange(r))}}}parseColor(e){if("string"!=typeof e)return null;let t={};if(3===(e="#"===(e=e.trim().toUpperCase())[0]?e.substr(1):e).length)t={r:parseInt(e[0]+e[0],16),g:parseInt(e[1]+e[1],16),b:parseInt(e[2]+e[2],16),a:1};else if(6===e.length)t={r:parseInt(e.substr(0,2),16),g:parseInt(e.substr(2,2),16),b:parseInt(e.substr(4,2),16),a:1};else if(8===e.length)t={r:parseInt(e.substr(0,2),16),g:parseInt(e.substr(2,2),16),b:parseInt(e.substr(4,2),16),a:Math.round(parseInt(e.substr(6,2),16)/255*100)/100};else if(4{s[t]=this.clone(e,i)}):this.isPlainObject(e)?(s={},Object.assign(s,e),i.exclude&&i.exclude.forEach(e=>{delete s[e]}),Object.keys(s).forEach(e=>{s[e]=this.clone(s[e],i),void 0===s[e]&&delete s[e]})):e instanceof Function&&!i.functions||e instanceof Node&&!i.elements||e instanceof Event&&!i.events||(s=e),s}extend(i,s){if(Array.isArray(i)){if(!Array.isArray(s))throw new Error("Arrays can be extended with arrays only");i.splice(0,i.length),s.forEach(e=>{i.push(this.clone(e))})}else{if(i instanceof Node||i instanceof Event)throw new Error("HTML elmenents and events cannot be extended");if(i&&"object"==typeof i&&null!=s){if("object"!=typeof s)throw new Error("Object can be extended with other objects only.");Object.keys(s).forEach(e=>{var t;null!=i[e]&&"object"==typeof i[e]&&null!=s[e]&&"object"==typeof s[e]?(t=this.clone(s[e]),i[e]instanceof Node||i[e]instanceof Event?i[e]=t:(Array.isArray(i[e])&&this.isPlainObject(t)&&(i[e]={}),this.extend(i[e],t))):i[e]=this.clone(s[e])})}else if(null!=s)throw new Error("Object is not extendable, only {} or [] can be extended.")}if(2{"string"==typeof e||"number"==typeof e?i[t]={id:e,text:String(e)}:null!=e?(null!=e.caption&&null==e.text&&(e.text=e.caption),null!=e.text&&null==e.id&&(e.id=e.text),null==e.text&&null!=e.id&&(e.text=e.id)):i[t]={id:null,text:"null"}}),i):"function"==typeof i?(e=i.call(this,i,e),w2utils.normMenu.call(this,e)):"object"==typeof i?Object.keys(i).map(e=>({id:e,text:i[e]})):void 0}bindEvents(e,r){0!=e.length&&(e?.[0]instanceof Node&&(e=Array.isArray(e)?e:e.get()),query(e).each(s=>{let l=query(s).data();Object.keys(l).forEach(i=>{if(-1!=["click","dblclick","mouseenter","mouseleave","mouseover","mouseout","mousedown","mousemove","mouseup","contextmenu","focus","focusin","focusout","blur","input","change","keydown","keyup","keypress"].indexOf(String(i).toLowerCase())){let e=l[i],t=(e="string"==typeof e?e.split("|").map(e=>{"null"===(e="undefined"===(e="false"===(e="true"===e?!0:e)?!1:e)?void 0:e)&&(e=null);var t=["'",'"',"`"];return e="string"==typeof(e=parseFloat(e)==e?parseFloat(e):e)&&t.includes(e[0])&&t.includes(e[e.length-1])?e.substring(1,e.length-1):e}):e)[0];e=e.slice(1),query(s).off(i+".w2utils-bind").on(i+".w2utils-bind",function(i){switch(t){case"alert":alert(e[0]);break;case"stop":i.stopPropagation();break;case"prevent":i.preventDefault();break;case"stopPrevent":return i.stopPropagation(),i.preventDefault(),!1;default:if(null==r[t])throw new Error(`Cannot dispatch event as the method "${t}" does not exist.`);r[t].apply(r,e.map((e,t)=>{switch(String(e).toLowerCase()){case"event":return i;case"this":return this;default:return e}}))}})}})}))}}var w2utils=new Utils;class Dialog extends w2base{constructor(){super(),this.defaults={title:"",text:"",body:"",buttons:"",width:450,height:250,focus:null,actions:null,style:"",speed:.3,modal:!1,maximized:!1,keyboard:!0,showClose:!0,showMax:!1,transition:null,openMaximized:!1,moved:!1},this.name="popup",this.status="closed",this.onOpen=null,this.onClose=null,this.onMax=null,this.onMin=null,this.onToggle=null,this.onKeydown=null,this.onAction=null,this.onMove=null,this.tmp={},this.handleResize=e=>{this.options.moved||this.center(void 0,void 0,!0)}}open(s){let l=this;"closing"!=this.status&&!query("#w2ui-popup").hasClass("animating")||this.close(!0);var e=this.options;null!=(s=["string","number"].includes(typeof s)?w2utils.extend({title:"Notification",body:`