diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/train.py b/spaces/17TheWord/RealESRGAN/realesrgan/train.py deleted file mode 100644 index 8a9cec9ed80d9f362984779548dcec921a636a04..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/realesrgan/train.py +++ /dev/null @@ -1,11 +0,0 @@ -# flake8: noqa -import os.path as osp -from basicsr.train import train_pipeline - -import realesrgan.archs -import realesrgan.data -import realesrgan.models - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - train_pipeline(root_path) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md deleted file mode 100644 index 5eaae246e0581a6dafa9fd6ba8fee142f52b97f7..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Desarrollo del pensamiento tomo 2 resuelto pdf 27 La aventura de Shakespeare en el volumen II de Plaza Janes.md +++ /dev/null @@ -1,98 +0,0 @@ - -

Introduction

-

Have you ever heard of desarrollo del pensamiento tomo 2 resuelto pdf 27? If you are interested in developing your thinking skills, this book is for you. It is a Spanish book that translates to "Development of Thinking Volume 2 Solved PDF 27". It is a comprehensive guide that covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It is written by a team of experts from different fields and disciplines, and it includes exercises, examples, diagrams, tables and charts to help you understand and apply the concepts.

-

Why is it important to study this book? Because in today's complex and dynamic world, you need to be able to think clearly, critically and creatively. You need to be able to analyze information, evaluate arguments, solve problems, make decisions, generate ideas and innovate solutions. These skills are essential for your personal and professional growth, as well as for your contribution to society. By studying this book, you will learn how to improve your thinking skills and become a better thinker.

-

desarrollo del pensamiento tomo 2 resuelto pdf 27


Download Ziphttps://byltly.com/2uKvwk



-

Main body

-

What are the main topics covered in the book?

-

The book is divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation. Each part contains several chapters that explore different aspects of these topics. Here are some of the main topics covered in the book:

-

Logic and reasoning

-

This part introduces you to the basics of logic and reasoning, such as propositions, arguments, validity, soundness, fallacies, induction, deduction and abduction. You will learn how to identify and construct valid and sound arguments, how to avoid common logical errors and fallacies, how to use different types of reasoning for different purposes and contexts, and how to evaluate the strength of evidence and arguments.

-

Critical thinking and problem solving

-

This part teaches you how to apply logic and reasoning to critical thinking and problem solving. You will learn how to define problems, identify assumptions, generate hypotheses, test solutions, monitor results and revise strategies. You will also learn how to use various tools and techniques for critical thinking and problem solving, such as brainstorming, mind mapping, SWOT analysis, decision matrix, fishbone diagram and Pareto principle.

-

desarrollo del pensamiento tomo 2 solucionario pdf gratis
-descargar desarrollo del pensamiento tomo 2 resuelto pdf
-libro desarrollo del pensamiento tomo 2 resuelto pdf completo
-desarrollo del pensamiento tomo 2 resuelto pdf 2021
-desarrollo del pensamiento tomo 2 resuelto pdf online
-desarrollo del pensamiento tomo 2 resuelto pdf descargar gratis
-desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27
-desarrollo del pensamiento tomo 2 resuelto pdf ejercicios
-desarrollo del pensamiento tomo 2 resuelto pdf pagina 27
-desarrollo del pensamiento tomo 2 resuelto pdf gratis
-desarrollo del pensamiento tomo 2 resuelto pdf sway
-desarrollo del pensamiento tomo 2 resuelto pdf soundcloud
-desarrollo del pensamiento tomo 2 resuelto pdf libro
-desarrollo del pensamiento tomo 2 resuelto pdf download
-desarrollo del pensamiento tomo 2 resuelto pdf gratis online
-desarrollo del pensamiento tomo 2 resuelto pdf soluciones
-desarrollo del pensamiento tomo 2 resuelto pdf completo
-desarrollo del pensamiento tomo 2 resuelto pdf gratis descargar
-desarrollo del pensamiento tomo 2 resuelto pdf capitulo 27 solucionario
-desarrollo del pensamiento tomo 2 resuelto pdf ejercicios resueltos
-desarrollo del pensamiento tomo 2 resuelto pdf pagina 27 soluciones
-desarrollo del pensamiento tomo 2 resuelto pdf gratis sway
-desarrollo del pensamiento tomo 2 resuelto pdf gratis soundcloud
-desarrollo del pensamiento tomo 2 resuelto pdf libro gratis
-desarrollo del pensamiento tomo 2 resuelto pdf download gratis
-desarrollo del pensamiento tomo 2 solucionario pdf online
-descargar desarrollo del pensamiento tomo 2 solucionario pdf gratis
-libro desarrollo del pensamiento tomo 2 solucionario pdf completo
-desarrollo del pensamiento tomo 2 solucionario pdf 2021
-desarrollo del pensamiento tomo 2 solucionario pdf descargar gratis
-desarrollo del pensamiento tomo 2 solucionario pdf capitulo 27
-desarrollo del pensamiento tomo 2 solucionario pdf ejercicios
-desarrollo del pensamiento tomo 2 solucionario pdf pagina 27
-desarrollo del pensamiento tomo 2 solucionario pdf sway
-desarrollo del pensamiento tomo 2 solucionario pdf soundcloud
-desarrollo del pensamiento tomo 2 solucionario pdf libro
-desarrollo del pensamiento tomo 2 solucionario pdf download
-descargar desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf gratis
-libro desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf completo
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf online
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pdf descargar gratis
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 ejercicios
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 pagina
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 sway
-desarrollo del pensamiento tomo 2 resuelto capitulo 27 soundcloud
-desarrollo del pensamiento tomo 2 resuelto capitulo

-

Creativity and innovation

-

This part shows you how to use logic and reasoning to enhance your creativity and innovation. You will learn how to develop your creative potential, overcome mental blocks, stimulate your imagination, generate original ideas and implement innovative solutions. You will also learn how to use various methods and models for creativity and innovation, such as lateral thinking, divergent thinking, convergent thinking, TRIZ method, SCAMPER technique and design thinking.

-

How can you access the book online?

-

If you want to read desarrollo del pensamiento tomo 2 resuelto pdf 27 online, you have several options. Here are some of them:

-

Download it from Sway

-

Sway is a Microsoft service that allows you to create and share interactive presentations online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Sway by following this link: https://sway.office.com/skrWSVcG4BefKxCb. You can download the PDF file from there by clicking on the download icon at the top right corner of the screen.

-

Read it on Scribd

-

Scribd is a digital library that offers unlimited access to books, audiobooks, magazines and documents online. You can find desarrollo del pensamiento tomo 2 resuelto pdf 27 on Scribd by following this link: https://www.scribd.com/document/511741583/Desarrollo-Del-Pensamiento-Tomo-2-Resuelto-Pdf-27. You can read the book online or download it as a PDF file by clicking on the download icon at the top right corner of the screen.

-

Buy it from Amazon

-

Amazon is an online marketplace that sells books, electronics, clothing and other products. You can buy desarrollo del pensamiento tomo 2 resuelto pdf 27 on Amazon by following this link: https://www.amazon.com/Desarrollo-Del-Pensamiento-Tomo-Resuelto/dp/B08ZJWZQ8Q. You can order the paperback version or the Kindle version of the book by clicking on the add to cart or buy now buttons.

-

How can you use the book to improve your skills?

-

Reading desarrollo del pensamiento tomo 2 resuelto pdf 27 online is not enough if you want to improve your skills. You need to practice what you learn by doing the exercises and examples in the book. You also need to apply what you learn by using the concepts in real-life situations. Here are some tips on how to use the book effectively:

-

Follow the exercises and examples

-

The book contains many exercises and examples that help you test your understanding and reinforce your learning. You should follow them carefully and try to solve them on your own before checking the answers. You should also compare your answers with those provided in the book and analyze why they are correct or incorrect. This will help you identify your strengths and weaknesses and improve your skills.

-

Apply the concepts to real-life situations

-

The book also contains many case studies and scenarios that illustrate how the concepts can be applied in real-life situations. You should read them attentively and try to relate them to your own experiences or interests. You should also think of other situations where you can use the concepts in your personal or professional life. This will help you transfer your learning from theory to practice and enhance your skills.

-

Join a study group or a forum

-

The book can be more enjoyable and effective if you study it with others who share your interest or goal. You can join a study group or a forum where you can discuss the topics in the book with other learners or experts. You can ask questions, share insights, exchange feedbacks or challenge each other with new problems or ideas. This will help you expand your perspective and deepen your understanding.

-

Conclusion

-

Summary of the main points

-

In conclusion, desarrollo del pensamiento tomo 2 resuelto pdf 27 is a valuable resource for anyone who wants to develop their thinking skills. It covers various aspects of logic, reasoning, critical thinking, problem solving, creativity and innovation. It provides exercises, examples, diagrams, tables and charts to help you understand and apply the concepts. It also offers several options for accessing the book online, such as downloading it from Sway, reading it on Scribd or buying it from Amazon. Finally, it gives some tips on how to use the book effectively, such as following the exercises and examples, applying the concepts to real-life situations or joining a study group or a forum.

-

Recommendations for further reading

-

If you want to learn more about the topics covered in the book, you can check out these resources:

- -

FAQs

-

Here are some frequently asked questions about desarrollo del pensamiento tomo 2 resuelto pdf 27:

-
    -
  1. What is the purpose of the book?
    The purpose of the book is to help you develop your thinking skills in various aspects, such as logic, reasoning, critical thinking, problem solving, creativity and innovation.
  2. -
  3. Who is the author of the book?
    The book is written by a team of experts from different fields and disciplines, such as mathematics, philosophy, psychology, engineering and education.
  4. -
  5. How long is the book?
    The book is about 400 pages long. It contains 27 chapters divided into three parts: logic and reasoning, critical thinking and problem solving, and creativity and innovation.
  6. -
  7. How can I get a copy of the book?
    You can get a copy of the book online by downloading it from Sway, reading it on Scribd or buying it from Amazon. You can also find it in some libraries or bookstores.
  8. -
  9. How can I use the book effectively?
    You can use the book effectively by following the exercises and examples in the book, applying the concepts to real-life situations and joining a study group or a forum.
  10. -
-

0a6ba089eb
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md deleted file mode 100644 index 66a0a5e98254d1df73466ae226c0055d2a13e78f..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download V-ray Sketchup 2016 64 Bit Full Crack !EXCLUSIVE!.md +++ /dev/null @@ -1,38 +0,0 @@ -
-

How to Download V-Ray SketchUp 2016 64 Bit Full Crack

-

V-Ray is a powerful rendering engine that can enhance the quality and realism of your 3D models and scenes. It is compatible with SketchUp, a popular 3D modeling and design software that can create stunning architectural and interior designs. If you want to download V-Ray SketchUp 2016 64 bit full crack for free, you are in the right place. In this article, we will show you how to download and install V-Ray SketchUp 2016 64 bit full crack on your PC.

-

download v-ray sketchup 2016 64 bit full crack


Download >> https://byltly.com/2uKzZy



-

What is V-Ray SketchUp 2016 64 Bit Full Crack?

-

V-Ray SketchUp 2016 64 bit full crack is a cracked version of V-Ray SketchUp 2016 64 bit, which is a plugin that adds rendering capabilities to SketchUp. With V-Ray SketchUp 2016 64 bit full crack, you can render photorealistic images and animations with advanced lighting, materials, and camera settings. You can also use V-Ray SketchUp 2016 64 bit full crack to create realistic effects such as depth of field, motion blur, fog, caustics, and more.

-

V-Ray SketchUp 2016 64 bit full crack has many features and benefits, such as:

- -

How to Download V-Ray SketchUp 2016 64 Bit Full Crack?

-

To download V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:

-
    -
  1. Click on this link to download V-Ray SketchUp 2016 64 bit full crack from Google Drive: Download V-Ray SketchUp 2016 64 bit full crack.
  2. -
  3. Extract the downloaded file with WinRAR or any other file compression software.
  4. -
  5. Run the installer file "SketchUpPro-en-x64.exe" and follow the instructions to install SketchUp Pro 2016 on your PC.
  6. -
  7. After the installation is complete, unzip the file "SketchUp Pro 2016 x64-patch.zip". Inside it, you will find a patcher file named "su2015-64-patch.exe".
  8. -
  9. Copy and paste the patcher file to the folder where you installed SketchUp (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016).
  10. -
  11. Run the patcher file as administrator and click on the patch button. You will see a message saying "Can not find the file. Search the file?". Click on "Yes".
  12. -
  13. A new window will open. Browse to the folder "LayOut" (by default, it is C:\\Program Files\\SketchUp\\SketchUp 2016\\LayOut) and select the file "LayOut.exe". Click on "Open".
  14. -
  15. The patcher will patch the file and show a message saying "The file has been patched!". Click on "OK".
  16. -
  17. Repeat steps 6 to 8 for the files "Style Builder.exe" and "SketchUp.exe" in their respective folders.
  18. -
  19. You have now successfully installed V-Ray SketchUp 2016 64 bit full crack on your PC.
  20. -
-

How to Use V-Ray SketchUp 2016 64 Bit Full Crack?

-

To use V-Ray SketchUp 2016 64 bit full crack, you need to follow these steps:

-

-
    -
  1. Launch Sketch

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md b/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md deleted file mode 100644 index 8eae6367ec34b109ece7ee4bc8d65959e587702c..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar !LINK!.md +++ /dev/null @@ -1,12 +0,0 @@ -
    -

    This text contains a list of file names and links related to Celemony Melodyne Editor, a software for editing audio files. The file names have different extensions, such as .rar, .zip, .html, and indicate the version number (v2.1.1.15), the release group (R2R), and the presence of a crack (a program that bypasses the software's copy protection). The file size is 84.8 MB for most of the files. The links at the end of the text point to websites that offer downloads of other files, such as a summary of biology for high school students in PDF format, a physics textbook for class 9 in PDF format, and a Hindi comedy movie in 720p resolution.

    -

    Celemony.Melodyne.Editor.v2.1.1.15-R2R .rar


    Download File ››› https://imgfil.com/2uy0QP



    - -

    Celemony Melodyne Editor is a software that allows users to manipulate audio files in various ways, such as changing the pitch, tempo, timing, and tone of individual notes or entire tracks. It can also correct intonation and timing errors, create harmonies and melodies, and transcribe audio into musical notation. Celemony Melodyne Editor is compatible with Windows and Mac operating systems, and can be used as a standalone application or as a plug-in for other audio editing software.

    - -

    The files listed in the text are compressed archives that contain the installation files and the crack for Celemony Melodyne Editor. A crack is a program that modifies the software's code to bypass its copy protection and allow users to use it without a license or activation key. However, using a crack is illegal and risky, as it may contain malware or viruses that can harm the user's computer or data. Moreover, using a cracked software may result in poor performance, errors, or compatibility issues with other software or hardware.

    - -

    The links at the end of the text are unrelated to Celemony Melodyne Editor and seem to be spam or phishing attempts. They direct the user to websites that offer downloads of other files that may be of interest to some users, such as educational materials or entertainment content. However, these websites may also contain malware or viruses that can harm the user's computer or data. Furthermore, downloading these files may infringe the intellectual property rights of the original authors or creators. Therefore, it is advisable to avoid clicking on these links and to delete the text.

    -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md b/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md deleted file mode 100644 index 962254a20601b7e9dd3a934ff5f93abe138ac33b..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Cubase 6 Full Version Free Download Torrent [REPACK].md +++ /dev/null @@ -1,14 +0,0 @@ -

    cubase 6 full version free download torrent


    DOWNLOAD ····· https://imgfil.com/2uxZsc



    -
    -Current sounds can only be downloaded using the Steinberg Download Assistant. ... 1, MAC WINDOWS, Groove Agent ONE/SE/4 VST Toolkit, 800MB.... 3, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2GB. ... -4, MAC WINDOWS, Groove Agent SE/5 VST Toolkit, 2 GB -Jul 12 2019 Download. -Groove Agent SE 5.0 VST, AAX, AU WIN.OSX x86 x64 Release Year/Date: 05.2019 Version: 5.0 Developer: Steinberg Website -Feb 7 -2014 · Groove Agent SE 5.0. -Description: Steinberg Groove Agent puts at your disposal a set of tools and ... VST, AAX, AU -Mar 9 2015 Download torrent for free. -distribution statistics. ... 8a78ff9644
    -
    -
    -

    diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md deleted file mode 100644 index 1a58c33f2ea2691aeeb4ddabc2f3ead811761c26..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/CarX Drift Racing Lite MOD APK OBB Everything You Need to Know Before You Download.md +++ /dev/null @@ -1,90 +0,0 @@ - -

    CarX Drift Racing Lite Mod APK OBB: A Guide for Drift Racing Fans

    -

    Do you love drifting and racing games? Do you want to experience the thrill of driving realistic cars on challenging tracks? If yes, then you should try CarX Drift Racing Lite, a popular game that lets you enjoy the best of both worlds. And if you want to make the game even more fun and exciting, you should download CarX Drift Racing Lite Mod APK OBB, a modified version that gives you unlimited money, coins, cars, tracks, and more. In this article, we will tell you everything you need to know about CarX Drift Racing Lite and its mod apk obb version.

    -

    What is CarX Drift Racing Lite?

    -

    CarX Drift Racing Lite is a racing game that focuses on drifting, a driving technique where the driver intentionally oversteers the car to make it slide sideways. The game is developed by CarX Technologies, a company that specializes in creating realistic car physics and graphics for games. CarX Drift Racing Lite is a lite version of CarX Drift Racing, which means it has fewer cars, tracks, and features than the original game. However, it still offers a lot of fun and entertainment for drift racing fans.

    -

    carx drift racing lite mod apk obb


    Download ►►►►► https://urlin.us/2uT1lA



    -

    Features of CarX Drift Racing Lite

    -

    Realistic physics and graphics

    -

    One of the main attractions of CarX Drift Racing Lite is its realistic physics and graphics. The game uses a sophisticated car physics engine that simulates the behavior of real cars on different surfaces and conditions. The game also has stunning graphics that create a immersive environment for the players. You can see the smoke, dust, sparks, and tire marks as you drift your car on the track. You can also feel the vibration and sound effects as you accelerate, brake, and steer your car.

    -

    Customizable cars and tracks

    -

    Another feature of CarX Drift Racing Lite is its customizable cars and tracks. The game allows you to choose from a variety of cars, each with its own characteristics and performance. You can also customize your car's appearance, color, wheels, engine, suspension, and more. You can also choose from different tracks, each with its own layout, difficulty, and scenery. You can also adjust the weather, time of day, and camera angle to suit your preference.

    -

    Online and offline modes

    -

    A third feature of CarX Drift Racing Lite is its online and offline modes. The game lets you play either online or offline, depending on your internet connection and mood. If you play online, you can compete with other players from around the world in various modes such as time attack, ghost mode, or multiplayer mode. You can also chat with other players and share your replays and screenshots. If you play offline, you can practice your skills in single-player mode or challenge yourself in career mode.

    -

    Why download CarX Drift Racing Lite Mod APK OBB?

    -

    If you are already enjoying CarX Drift Racing Lite, you might wonder why you should download CarX Drift Racing Lite Mod APK OBB. Well, the answer is simple: because it makes the game even better. CarX Drift Racing Lite Mod APK OBB is a modified version of the game that gives you access to unlimited money

    No ads and no root required

    -

    With CarX Drift Racing Lite Mod APK OBB, you don't have to deal with annoying ads and pop-ups that interrupt your gameplay. You can enjoy the game without any distractions or interruptions. You also don't need to root your device to install the mod apk obb files. You can simply follow the instructions below and enjoy the game safely and smoothly.

    -

    How to download and install CarX Drift Racing Lite Mod APK OBB?

    -

    Step 1: Download the mod apk and obb files from a trusted source

    -

    The first step is to download the mod apk and obb files from a trusted source. You can use the link provided at the end of this article to download the files. Make sure you have enough storage space on your device before downloading the files.

    -

    Step 2: Enable unknown sources on your device settings

    -

    The second step is to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may also need to disable any antivirus or security apps that may interfere with the installation process.

    -

    carx drift racing lite mod apk obb download
    -carx drift racing lite mod apk obb unlimited money
    -carx drift racing lite mod apk obb latest version
    -carx drift racing lite mod apk obb android 1
    -carx drift racing lite mod apk obb revdl
    -carx drift racing lite mod apk obb rexdl
    -carx drift racing lite mod apk obb offline
    -carx drift racing lite mod apk obb hack
    -carx drift racing lite mod apk obb free
    -carx drift racing lite mod apk obb data
    -carx drift racing lite mod apk obb file
    -carx drift racing lite mod apk obb full
    -carx drift racing lite mod apk obb mega
    -carx drift racing lite mod apk obb mediafire
    -carx drift racing lite mod apk obb google drive
    -carx drift racing lite mod apk obb 2023
    -carx drift racing lite mod apk obb update
    -carx drift racing lite mod apk obb new
    -carx drift racing lite mod apk obb best
    -carx drift racing lite mod apk obb premium
    -carx drift racing lite mod apk obb pro
    -carx drift racing lite mod apk obb vip
    -carx drift racing lite mod apk obb unlocked
    -carx drift racing lite mod apk obb all cars
    -carx drift racing lite mod apk obb no ads
    -carx drift racing lite mod apk obb no root
    -carx drift racing lite mod apk obb no verification
    -carx drift racing lite mod apk obb no survey
    -carx drift racing lite mod apk obb easy install
    -carx drift racing lite mod apk obb direct link
    -carx drift racing lite mod apk obb high quality
    -carx drift racing lite mod apk obb realistic graphics
    -carx drift racing lite mod apk obb smooth gameplay
    -carx drift racing lite mod apk obb awesome features
    -carx drift racing lite mod apk obb fun modes
    -carx drift racing lite mod apk obb online multiplayer
    -carx drift racing lite mod apk obb custom cars
    -carx drift racing lite mod apk obb tuning options
    -carx drift racing lite mod apk obb drifting physics
    -carx drift racing lite mod apk obb sound effects
    -carx drift racing lite mod apk obb music tracks
    -carx drift racing lite mod apk obb leaderboards
    -carx drift racing lite mod apk obb achievements
    -carx drift racing lite mod apk obb rewards
    -carx drift racing lite mod apk obb cheats
    -carx drift racing lite mod apk obb tips tricks
    -carx drift racing lite mod apk obb guide tutorial
    -carx drift racing lite mod apk obb review rating
    -carx drift racing lite mod apk obb gameplay video

    -

    Step 3: Install the mod apk file and extract the obb file to the Android/obb folder

    -

    The third step is to install the mod apk file and extract the obb file to the Android/obb folder. To do this, locate the downloaded files on your device, then tap on the mod apk file and follow the instructions to install it. Then, use a file manager app to extract the obb file to the Android/obb folder. If you don't have a file manager app, you can download one from the Google Play Store. Make sure you create a folder named com.CarXTech.CarXDriftRacingLite inside the Android/obb folder and place the extracted obb file there.

    -

    Conclusion

    -

    CarX Drift Racing Lite is a great game for drift racing fans who want to experience realistic physics and graphics, customizable cars and tracks, and online and offline modes. However, if you want to make the game even more enjoyable and exciting, you should download CarX Drift Racing Lite Mod APK OBB, which gives you unlimited money, coins, cars, tracks, and more. You can download CarX Drift Racing Lite Mod APK OBB from the link below and follow the steps above to install it on your device. Have fun drifting and racing!

    -

    FAQs

    -

    Here are some of the frequently asked questions about CarX Drift Racing Lite Mod APK OBB:

    -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md deleted file mode 100644 index 049d638a7d7f013ef0ef513349501dfbd29a3b20..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download ETS2 Mods for Euro Truck Simulator 2 and Enhance Your Gaming Experience.md +++ /dev/null @@ -1,110 +0,0 @@ -
    -

    Euro Truck Simulator 2 For Mobile - Everything You Need to Know

    -

    Do you love driving trucks and exploring new places? Do you want to experience the thrill of being a truck driver from the comfort of your home? If you answered yes to any of these questions, then you should definitely check out Euro Truck Simulator 2, one of the most popular and realistic truck driving simulator games ever made. And the best part is, you can now play it on your mobile device thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. In this article, we will tell you everything you need to know about Euro Truck Simulator 2 for mobile, including what it is, how to download and install it, how to play it, and why you should try it today.

    -

    ets2 mobi


    DOWNLOADhttps://urlin.us/2uSUVd



    -

    What is Euro Truck Simulator 2?

    -

    Euro Truck Simulator 2, or ETS2 for short, is a game that simulates the life of a truck driver in Europe. It was developed and published by SCS Software, a Czech company that specializes in creating simulation games. ETS2 was released in 2012 for Windows, Linux, and Mac OS, and has since received many updates and expansions that added new features, content, and improvements. ETS2 has three main aspects that make it so appealing and realistic: a truck driving simulator, a huge map of Europe, and a variety of trucks and customization options.

    -

    A realistic truck driving simulator game

    -

    ETS2 is not just a game where you drive a truck from point A to point B. It is a game where you have to follow the rules of the road, deal with traffic, weather, fuel consumption, fatigue, cargo delivery, fines, repairs, and more. You have to plan your routes carefully, choose the best contracts, manage your finances, hire drivers, buy garages, and grow your own trucking company. You also have to take care of your truck, which can get damaged or break down if you drive recklessly or neglect maintenance. You can also customize your truck with different parts, accessories, paint jobs, decals, and more.

    -

    A huge map of Europe to explore

    -

    ETS2 features a massive map of Europe that covers over 70 cities in 13 countries. You can drive across different landscapes, such as mountains, forests, fields, deserts, coasts, and urban areas. You can also visit famous landmarks, such as the Eiffel Tower in Paris, the Brandenburg Gate in Berlin, the Colosseum in Rome, and more. The map is constantly updated with new regions and roads that add more diversity and realism to the game. You can also download mods that add even more countries and locations to the game.

    -

    A variety of trucks and customization options

    -

    ETS2 offers a wide range of trucks from different manufacturers, such as Mercedes-Benz, Volvo, Scania, MAN, DAF, Renault, Iveco, and more. Each truck has its own specifications, performance, handling, sound effects, and interior design. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.

    -

    What is ets2.mobi?

    -

    ets2.mobi is a website that offers Euro Truck Simulator 2 for mobile devices. It allows you to download and install ETS2 on your Android or iOS phone or tablet without any hassle. You don't need to root or jailbreak your device or use any complicated software or hardware. You just need to follow a few simple steps and you will be able to enjoy ETS2 on your mobile device in no time.

    -

    How to download and install ETS2 on your phone or tablet

    -

    Downloading and installing ETS2 on your mobile device is very easy and fast. Here are the steps you need to follow:

    -

    ets2 mobile apk download
    -ets2 android gameplay
    -ets2 ios app
    -ets2 licensed trucks
    -ets2 customization options
    -ets2 advanced driving physics
    -ets2 truck driving simulator
    -ets2 official website
    -ets2 modhub
    -ets2 mods download
    -ets2 best mods
    -ets2 realistic mods
    -ets2 map mods
    -ets2 traffic mods
    -ets2 sound mods
    -ets2 graphics mods
    -ets2 tuning mods
    -ets2 trailer mods
    -ets2 skin mods
    -ets2 truck mods
    -ets2 multiplayer mod
    -ets2 online mod
    -ets2 promods
    -ets2 rusmap
    -ets2 balkans map
    -ets2 scandinavia dlc
    -ets2 going east dlc
    -ets2 vive la france dlc
    -ets2 italia dlc
    -ets2 beyond the baltic sea dlc
    -ets2 road to the black sea dlc
    -ets2 iberia dlc
    -ets2 heart of russia dlc
    -ets2 cabin accessories dlc
    -ets2 wheel tuning pack dlc
    -ets2 mighty griffin tuning pack dlc
    -ets2 heavy cargo pack dlc
    -ets2 special transport dlc
    -ets2 high power cargo pack dlc
    -ets2 krone trailer pack dlc
    -ets2 schwarzmuller trailer pack dlc
    -ets2 michelin fan pack dlc
    -ets2 goodyear tyres pack dlc
    -ets2 actros tuning pack dlc
    -ets2 fh tuning pack dlc

    -
      -
    1. Go to ets2.mobi on your mobile browser and click on the download button.
    2. -
    3. Choose your device type (Android or iOS) and wait for the download to finish.
    4. -
    5. Open the downloaded file and follow the instructions to install ETS2 on your device.
    6. -
    7. Launch the game and enjoy playing ETS2 on your mobile device.
    8. -
    -

    Note: You may need to enable unknown sources or trust the app in your device settings before installing ETS2. This is a normal procedure for installing apps from outside the official app stores and it does not harm your device or data in any way.

    -

    The features and benefits of playing ETS2 on mobile

    -

    Playing ETS2 on your mobile device has many advantages over playing it on a PC or console. Here are some of them:

    - -

    How to play ETS2 on mobile?

    -

    Playing ETS2 on your mobile device is very similar to playing it on a PC or console. You just need to learn the controls and interface of the game and you will be ready to hit the road. Here are some tips and tricks to help you get started:

    -

    The controls and interface of ETS2 on mobile

    -

    The controls and interface of ETS2 on mobile are designed to be intuitive and user-friendly. You can choose between different control modes, such as tilt, touch, or steering wheel. You can also customize the buttons, sensitivity, and layout of the controls according to your preference. You can also use voice commands to control some functions of the game, such as navigation, radio, or horn.

    -

    The interface of ETS2 on mobile consists of various elements that display important information and options for the game. You can see your speedometer, fuel gauge, damage indicator, map, GPS, mirrors, dashboard, and more. You can also access the menu, settings, profile, achievements, statistics, leaderboards, and more. You can also interact with various objects and characters in the game, such as toll booths, gas stations, rest areas, traffic lights, pedestrians, police officers, and more.

    -

    The game modes and challenges of ETS2 on mobile

    -

    ETS2 on mobile offers various game modes and challenges that suit different play styles and preferences. You can choose between different difficulty levels, such as easy, normal, or hard, depending on how realistic and challenging you want the game to be. You can also choose between different game modes, such as: - Career mode: This is the main mode of the game, where you start as a rookie driver and work your way up to become a successful trucker. You have to complete various contracts, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and expand your business. You can also customize your profile, choose your preferred truck brand, and join a company of your choice. - Free mode: This is a mode where you can drive freely across the map without any time or money constraints. You can explore different regions, visit landmarks, test different trucks, and enjoy the scenery. You can also switch between day and night, change the weather, and adjust the traffic density. - Challenge mode: This is a mode where you can test your skills and compete with other players in various challenges, such as parking, racing, cargo delivery, fuel economy, and more. You can also create your own challenges and share them with other players online.

    The tips and tricks to enjoy ETS2 on mobile

    -

    ETS2 on mobile is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. Here are some tips and tricks that can help you enjoy ETS2 on mobile more: - Follow the tutorial: The game offers a tutorial that teaches you the basics of the game, such as how to drive, park, deliver cargo, use the GPS, and more. It is highly recommended that you follow the tutorial before you start playing the game, as it will help you avoid many mistakes and problems later on. - Adjust the settings: The game allows you to adjust various settings that can affect your gameplay experience, such as graphics quality, sound volume, control mode, sensitivity, language, units, and more. You should experiment with different settings and find the ones that suit your device and preference best. - Save frequently: The game has an autosave feature that saves your progress every time you complete a contract or enter a new city. However, it is also advisable that you manually save your game often, especially before you start a long or difficult journey. This way, you can avoid losing your progress or money if something goes wrong or if the game crashes. - Drive carefully: The game simulates realistic driving physics and mechanics, which means that you have to drive carefully and follow the rules of the road. You have to pay attention to your speed limit, traffic signs, signals, lanes, pedestrians, and other vehicles. You also have to watch out for your fuel level, damage, fatigue, and cargo weight. If you drive recklessly or break the law, you can get fined, lose your cargo, damage your truck, or even cause accidents. You can also use the cruise control, speed limiter, and brake assist features to help you drive more smoothly and safely. - Use the GPS: The game provides you with a GPS system that shows you the best route to your destination, the distance and time remaining, the speed limit, and the traffic conditions. You can also use the map view to see the whole map of Europe and plan your routes ahead. You can also set waypoints, zoom in and out, and switch between 2D and 3D modes. The GPS is a very useful tool that can help you navigate the roads and avoid getting lost or stuck. - Enjoy the scenery: The game features stunning graphics and realistic sound effects that create a immersive atmosphere for the game. You can see the changing landscapes, weather, seasons, day and night cycles, and more. You can also listen to the radio, which offers various stations that play different genres of music and news. You can also use the photo mode to take pictures of your truck or the scenery and share them with other players online.

    Conclusion

    -

    Euro Truck Simulator 2 is a game that lets you experience the life of a truck driver in Europe. You can drive across different countries, deliver cargo, earn money, buy and upgrade trucks, hire drivers, and grow your own trucking company. You can also customize your truck with different parts, accessories, paint jobs, decals, and more. You can also download mods that add new trucks or modify existing ones.

    -

    ETS2 is now available for mobile devices thanks to ets2.mobi, a website that offers ETS2 for Android and iOS. You can download and install ETS2 on your phone or tablet without any hassle. You can enjoy the same graphics, gameplay, and content as the PC version of ETS2, as well as exclusive features and bonuses for mobile users. You can also connect with other players online and join multiplayer sessions, chat with them, share your progress, and more.

    -

    ETS2 is a fun and immersive game that can keep you entertained for hours. However, it can also be challenging and frustrating at times, especially if you are new to the game or not familiar with the controls. That's why we have provided you with some tips and tricks that can help you enjoy ETS2 on mobile more.

    -

    If you love driving trucks and exploring new places, then you should definitely try ETS2 on mobile today. It is a game that will make you feel like a real truck driver in Europe.

    -

    FAQs

    -

    Here are some frequently asked questions about ETS2 on mobile:

    -
      -
    1. Is ETS2 on mobile free?
    2. -

      Yes, ETS2 on mobile is free to download and play. However, it may contain some in-app purchases or ads that can enhance your gameplay experience or support the developers.

      -
    3. Is ETS2 on mobile safe?
    4. -

      Yes, ETS2 on mobile is safe to download and install on your device. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from ets2.mobi or other trusted sources to avoid any risks.

      -
    5. Is ETS2 on mobile compatible with my device?
    6. -

      ETS2 on mobile is compatible with most Android and iOS devices that have at least 4 GB of RAM and 3 GB of free storage space. However, some devices may have different performance or compatibility issues depending on their specifications or settings.

      -
    7. Can I play ETS2 on mobile offline?
    8. -

      Yes, you can play ETS2 on mobile offline without an internet connection. However, some features or functions may not work properly or be available offline, such as multiplayer mode, online leaderboards, updates, or downloads. You also need an internet connection to verify your game license and activate it on your device.

      -
    9. How can I contact the developers or report a bug?
    10. -

      If you have any questions, feedback, suggestions, or issues regarding ETS2 on mobile, you can contact the developers or report a bug through the following channels:

      -
        -
      • Email: support@ets2.mobi
      • -
      • Facebook: https://www.facebook.com/ets2mobi
      • -
      • Twitter: https://twitter.com/ets2mobi
      • -
      • Instagram: https://www.instagram.com/ets2mobi
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/src/components/chat-list.tsx b/spaces/2023Liu2023/bingo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
      - {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
      - ) -} diff --git a/spaces/360macky/first-space/app.py b/spaces/360macky/first-space/app.py deleted file mode 100644 index b178efdb6a5a27e18fec0525a278bdd2ede2b19c..0000000000000000000000000000000000000000 --- a/spaces/360macky/first-space/app.py +++ /dev/null @@ -1,5 +0,0 @@ -import streamlit as st - -x = st.slider('Select a value') -st.write(x, 'squared is', x * x) - diff --git a/spaces/4Taps/SadTalker/src/test_audio2coeff.py b/spaces/4Taps/SadTalker/src/test_audio2coeff.py deleted file mode 100644 index 3db6be3af59b0319c50106d9a92c903118f28410..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/test_audio2coeff.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import torch -import numpy as np -from scipy.io import savemat -from yacs.config import CfgNode as CN -from scipy.signal import savgol_filter - -from src.audio2pose_models.audio2pose import Audio2Pose -from src.audio2exp_models.networks import SimpleWrapperV2 -from src.audio2exp_models.audio2exp import Audio2Exp - -def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if model is not None: - model.load_state_dict(checkpoint['model']) - if optimizer is not None: - optimizer.load_state_dict(checkpoint['optimizer']) - - return checkpoint['epoch'] - -class Audio2Coeff(): - - def __init__(self, audio2pose_checkpoint, audio2pose_yaml_path, - audio2exp_checkpoint, audio2exp_yaml_path, - wav2lip_checkpoint, device): - #load config - fcfg_pose = open(audio2pose_yaml_path) - cfg_pose = CN.load_cfg(fcfg_pose) - cfg_pose.freeze() - fcfg_exp = open(audio2exp_yaml_path) - cfg_exp = CN.load_cfg(fcfg_exp) - cfg_exp.freeze() - - # load audio2pose_model - self.audio2pose_model = Audio2Pose(cfg_pose, wav2lip_checkpoint, device=device) - self.audio2pose_model = self.audio2pose_model.to(device) - self.audio2pose_model.eval() - for param in self.audio2pose_model.parameters(): - param.requires_grad = False - try: - load_cpk(audio2pose_checkpoint, model=self.audio2pose_model, device=device) - except: - raise Exception("Failed in loading audio2pose_checkpoint") - - # load audio2exp_model - netG = SimpleWrapperV2() - netG = netG.to(device) - for param in netG.parameters(): - netG.requires_grad = False - netG.eval() - try: - load_cpk(audio2exp_checkpoint, model=netG, device=device) - except: - raise Exception("Failed in loading audio2exp_checkpoint") - self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False) - self.audio2exp_model = self.audio2exp_model.to(device) - for param in self.audio2exp_model.parameters(): - param.requires_grad = False - self.audio2exp_model.eval() - - self.device = device - - def generate(self, batch, coeff_save_dir, pose_style): - - with torch.no_grad(): - #test - results_dict_exp= self.audio2exp_model.test(batch) - exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64 - - #for class_id in range(1): - #class_id = 0#(i+10)%45 - #class_id = random.randint(0,46) #46 styles can be selected - batch['class'] = torch.LongTensor([pose_style]).to(self.device) - results_dict_pose = self.audio2pose_model.test(batch) - pose_pred = results_dict_pose['pose_pred'] #bs T 6 - - pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device) - coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70 - - coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy() - - savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])), - {'coeff_3dmm': coeffs_pred_numpy}) - - return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])) - - diff --git a/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py b/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py deleted file mode 100644 index 19f11110ea822eeb140fb885c600536290a1adff..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/modules/uvr5/preprocess.py +++ /dev/null @@ -1,346 +0,0 @@ -import os -import logging - -logger = logging.getLogger(__name__) - -import librosa -import numpy as np -import soundfile as sf -import torch - -from infer.lib.uvr5_pack.lib_v5 import nets_61968KB as Nets -from infer.lib.uvr5_pack.lib_v5 import spec_utils -from infer.lib.uvr5_pack.lib_v5.model_param_init import ModelParameters -from infer.lib.uvr5_pack.lib_v5.nets_new import CascadedNet -from infer.lib.uvr5_pack.utils import inference - - -class AudioPre: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json") - model = Nets.CascadedASPPNet(mp.param["bins"] * 2) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - logger.info("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - logger.info("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -class AudioPreDeEcho: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json") - nout = 64 if "DeReverb" in model_path else 48 - model = CascadedNet(mp.param["bins"] * 2, nout) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_( - self, music_file, vocal_root=None, ins_root=None, format="flac" - ): # 3个VR模型vocal和ins是反的 - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - logger.info("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - logger.info("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) diff --git a/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py b/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py deleted file mode 100644 index 9a94bad5bfb30493f7364f2e52cbb4badbccb2c7..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/generate_human_motion/pyrender/examples/duck.py +++ /dev/null @@ -1,13 +0,0 @@ -from pyrender import Mesh, Scene, Viewer -from io import BytesIO -import numpy as np -import trimesh -import requests - -duck_source = "https://github.com/KhronosGroup/glTF-Sample-Models/raw/master/2.0/Duck/glTF-Binary/Duck.glb" - -duck = trimesh.load(BytesIO(requests.get(duck_source).content), file_type='glb') -duckmesh = Mesh.from_trimesh(list(duck.geometry.values())[0]) -scene = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0])) -scene.add(duckmesh) -Viewer(scene) diff --git a/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md b/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md deleted file mode 100644 index b98c0cb21bcd18f4bbec2f622d0aa58000bffc8b..0000000000000000000000000000000000000000 --- a/spaces/AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 01🗣️ Gradio NLP Speech 2 Text 2 Speech Generator AI Pipeline 🙉 -emoji: 🗣️🎤🙉 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py deleted file mode 100644 index b6f793751904658b3e7e01a5ffdaa6b86e156e66..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/_base_/models/resnet50_label_smooth.py +++ /dev/null @@ -1,18 +0,0 @@ -# model settings -model = dict( - type='ImageClassifier', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(3, ), - style='pytorch'), - neck=dict(type='GlobalAveragePooling'), - head=dict( - type='LinearClsHead', - num_classes=1000, - in_channels=2048, - loss=dict( - type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0), - topk=(1, 5), - )) diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py deleted file mode 100644 index 73b8acea41994a4e740791f66d90241fcc5da747..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/FreeGpt.py +++ /dev/null @@ -1,55 +0,0 @@ -from __future__ import annotations - -import time, hashlib, random - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider - -domains = [ - 'https://k.aifree.site', - 'https://p.aifree.site' -] - -class FreeGpt(AsyncGeneratorProvider): - url = "https://freegpts1.aifree.site/" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - timeout: int = 30, - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107", timeout=timeout) as session: - prompt = messages[-1]["content"] - timestamp = int(time.time()) - data = { - "messages": messages, - "time": timestamp, - "pass": None, - "sign": generate_signature(timestamp, prompt) - } - url = random.choice(domains) - async with session.post(f"{url}/api/generate", json=data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - -def generate_signature(timestamp: int, message: str, secret: str = ""): - data = f"{timestamp}:{message}:{secret}" - return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md b/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md deleted file mode 100644 index 74ce28264311247c50cdeb119e93ad31b7b2799f..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/.github/CONTRIBUTING.md +++ /dev/null @@ -1,80 +0,0 @@ -# How to contribute - -It's important to us that you feel you can contribute towards the evolution of Phaser. This can take many forms: from helping to fix bugs or improve the docs, to adding in new features to the source. This guide should help you in making that process as smooth as possible. - -Before contributing, please read the [code of conduct](https://github.com/photonstorm/phaser/blob/master/.github/CODE_OF_CONDUCT.md). - -## Reporting issues - -[GitHub Issues][0] is the place to report bugs you may have found. When submitting a bug please do the following: - -**1. Search for existing issues.** Your bug may have already been fixed, or cannot, or will not, be fixed. So be sure to search the issues first before putting in a duplicate issue. - -**2. Not sure if it's a bug?.** Please ask on the [forum][4]. If something is blatantly wrong then post it to GitHub. But if you feel it might just be because you're not sure of expected behavior, then it might save us time, and get you a response faster, if you post it to the Phaser forum instead. - -**3. Create an isolated and reproducible test case.** If you are reporting a bug, make sure you also have a minimal, runnable, code example that reproduces the problem you have. - -**4. Include a live example.** After narrowing your code down to only the problem areas, make use of [jsFiddle][1], [jsBin][2], [CodePen][5], or a link to your live site so that we can view a live example of the problem. - -**5. Share as much information as possible.** Include browser version affected, your OS, version of the library, steps to reproduce, etc. "X isn't working!!!1!" will probably just be closed. - -## Support Forum - -We have a very active [Phaser Support Forum][4]. If you need general support, or are struggling to understand how to do something or need your code checked over, then we would urge you to post it to our forum. There are a lot of friendly devs in there who can help, as well as the core Phaser team, so it's a great place to get support. You're welcome to report bugs directly on GitHub, but for general support we'd always recommend using the forum first. - -## Making Changes - -I'm assuming you already have a recent version of [Node](https://nodejs.org) installed locally and can run `npm`. This guide is tested and works on both Windows 10 and OS X. - -### 1. Checkout the repos - -Check-out both the [Phaser repo](https://github.com/photonstorm/phaser) and the [Phaser 3 Examples Repo](https://github.com/photonstorm/phaser3-examples). Make sure the Phaser 3 Examples repo is saved locally in a folder called `phaser3-examples`, which will be the default for most Git clients. - -### 2. Matching Directory Levels - -Ensure that both repos live at the same depth in your directory structure. For example: `/usr/home/web/phaser` and `/usr/home/web/phaser3-examples`. This is so the dev build scripts in the Phaser repo can safely copy files to `../phaser3-examples` and have them end up in the correct place. - -### 3. Install dependencies - -Using your console, run `npm install` or `yarn install` as we've configs for both. This process will install a local copy of webpack and a handful of small support scripts. Note that Yarn on Windows seems to have issues making some packages global, so stick with npm if this is the case. - -### 4. Webpack - -Making sure you've got both repos checked out, and at the same directory level in your filesystem, issue the command `webpack`. If you can't issue the command then webpack may need [installing globally](https://webpack.js.org/guides/installation/). Webpack will build Phaser and if there are any path errors in the code they'll be flagged during the build process. - -What you need is the ability to issue the command `webpack` within the v3 folder and have it work. - -### 5. ESLint - -There is an ESLint configuration and an Editor Configuration in the v3 folder. **Please adhere to them!** Although not enforced in the build process yet, I will be adding that at a later point. There are lots of tools you can install so your editor of choice will check the ESLint config during development. - -To test if your code passes our lint config issue the command `npm run lint`. - -## Coding style preferences are not contributions - -If your PR is doing little more than changing the Phaser source code into a format / coding style that you prefer then we will automatically close it. All PRs must adhere to the coding style already set-out across the thousands of lines of code in Phaser. Your personal preferences for how things should "look" or be structured do not apply here, sorry. PRs should fix bugs, fix documentation or add features. No changes for the sake of change. - -## I don't really like git / node.js, but I can fix this bug - -That is fine too. While Pull Requests are the best thing in the world for us, they are not the only way to help. You're welcome to post fixes to our forum or even just email them to us. All we ask is that you still adhere to the guidelines presented here re: ESLint, etc. - -## Code Style Guide - -We provide an .editorconfig and eslint config for you to use, but generally: - -- Use 4 spaces for tabs, never tab characters. - -- No trailing whitespace, blank lines should have no whitespace. - -- Always favor strict equals `===` unless you *need* to use type coercion. - -- Follow conventions already in the code, and listen to eslint. Our config is set-up for a reason. - -Thanks to Chad for creating the original Pixi.js Contributing file which we adapted for Phaser. - -[0]: https://github.com/photonstorm/phaser/issues -[1]: http://jsfiddle.net -[2]: http://jsbin.com/ -[3]: http://nodejs.org -[4]: https://phaser.discourse.group/ -[5]: https://codepen.io/pen?template=YeEWom "Phaser 3 game template" diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js deleted file mode 100644 index 1029e82aa2b7b4cb1022b01b94c799c5e7baa8cb..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/ball/Ball.js +++ /dev/null @@ -1,45 +0,0 @@ -import Base from '../base/Base.js'; -import { Circle } from '../utils/Geoms.js' -import Yoyo from '../utils/Yoyo.js'; - -const Linear = Phaser.Math.Linear; - -class Ball extends Base { - constructor(scene, config) { - super(scene, config); - this.type = 'rexSpinnerBall'; - } - - buildShapes() { - for (var i = 0; i < 3; i++) { - this.addShape(new Circle()); - } - } - - updateShapes() { - var centerX = this.centerX; - var centerY = this.centerY; - var radius = this.radius; - var ballRadius = radius * 0.1; - var lineWidth = Math.ceil(ballRadius * 0.25); - - var t = 1 - Yoyo(this.value); - var trackRadius = Linear(0.3, 0.9, t) * radius; - - var shapes = this.getShapes(); - for (var i = 0, cnt = shapes.length; i < cnt; i++) { - var ball = shapes[i]; - var t = (this.value + (i / cnt)) % 1; - var angle = Math.PI * 2 * t; - ball - .lineStyle(lineWidth, this.color) - .setRadius(ballRadius) - .setCenterPosition( - centerX + Math.cos(angle) * trackRadius, - centerY + Math.sin(angle) * trackRadius - ); - } - } -} - -export default Ball; \ No newline at end of file diff --git a/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py b/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py deleted file mode 100644 index a62b6edffa41529ba828905fb86ca302a01d37cc..0000000000000000000000000000000000000000 --- a/spaces/Alpaca233/SadTalker/src/generate_facerender_batch.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import numpy as np -from PIL import Image -from skimage import io, img_as_float32, transform -import torch -import scipy.io as scio - -def get_facerender_data(coeff_path, pic_path, first_coeff_path, audio_path, - batch_size, input_yaw_list=None, input_pitch_list=None, input_roll_list=None, - expression_scale=1.0, still_mode = False, preprocess='crop', size = 256): - - semantic_radius = 13 - video_name = os.path.splitext(os.path.split(coeff_path)[-1])[0] - txt_path = os.path.splitext(coeff_path)[0] - - data={} - - img1 = Image.open(pic_path) - source_image = np.array(img1) - source_image = img_as_float32(source_image) - source_image = transform.resize(source_image, (size, size, 3)) - source_image = source_image.transpose((2, 0, 1)) - source_image_ts = torch.FloatTensor(source_image).unsqueeze(0) - source_image_ts = source_image_ts.repeat(batch_size, 1, 1, 1) - data['source_image'] = source_image_ts - - source_semantics_dict = scio.loadmat(first_coeff_path) - generated_dict = scio.loadmat(coeff_path) - - if 'full' not in preprocess.lower(): - source_semantics = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70 - generated_3dmm = generated_dict['coeff_3dmm'][:,:70] - - else: - source_semantics = source_semantics_dict['coeff_3dmm'][:1,:73] #1 70 - generated_3dmm = generated_dict['coeff_3dmm'][:,:70] - - source_semantics_new = transform_semantic_1(source_semantics, semantic_radius) - source_semantics_ts = torch.FloatTensor(source_semantics_new).unsqueeze(0) - source_semantics_ts = source_semantics_ts.repeat(batch_size, 1, 1) - data['source_semantics'] = source_semantics_ts - - # target - generated_3dmm[:, :64] = generated_3dmm[:, :64] * expression_scale - - if 'full' in preprocess.lower(): - generated_3dmm = np.concatenate([generated_3dmm, np.repeat(source_semantics[:,70:], generated_3dmm.shape[0], axis=0)], axis=1) - - if still_mode: - generated_3dmm[:, 64:] = np.repeat(source_semantics[:, 64:], generated_3dmm.shape[0], axis=0) - - with open(txt_path+'.txt', 'w') as f: - for coeff in generated_3dmm: - for i in coeff: - f.write(str(i)[:7] + ' '+'\t') - f.write('\n') - - target_semantics_list = [] - frame_num = generated_3dmm.shape[0] - data['frame_num'] = frame_num - for frame_idx in range(frame_num): - target_semantics = transform_semantic_target(generated_3dmm, frame_idx, semantic_radius) - target_semantics_list.append(target_semantics) - - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - target_semantics_list.append(target_semantics) - - target_semantics_np = np.array(target_semantics_list) #frame_num 70 semantic_radius*2+1 - target_semantics_np = target_semantics_np.reshape(batch_size, -1, target_semantics_np.shape[-2], target_semantics_np.shape[-1]) - data['target_semantics_list'] = torch.FloatTensor(target_semantics_np) - data['video_name'] = video_name - data['audio_path'] = audio_path - - if input_yaw_list is not None: - yaw_c_seq = gen_camera_pose(input_yaw_list, frame_num, batch_size) - data['yaw_c_seq'] = torch.FloatTensor(yaw_c_seq) - if input_pitch_list is not None: - pitch_c_seq = gen_camera_pose(input_pitch_list, frame_num, batch_size) - data['pitch_c_seq'] = torch.FloatTensor(pitch_c_seq) - if input_roll_list is not None: - roll_c_seq = gen_camera_pose(input_roll_list, frame_num, batch_size) - data['roll_c_seq'] = torch.FloatTensor(roll_c_seq) - - return data - -def transform_semantic_1(semantic, semantic_radius): - semantic_list = [semantic for i in range(0, semantic_radius*2+1)] - coeff_3dmm = np.concatenate(semantic_list, 0) - return coeff_3dmm.transpose(1,0) - -def transform_semantic_target(coeff_3dmm, frame_index, semantic_radius): - num_frames = coeff_3dmm.shape[0] - seq = list(range(frame_index- semantic_radius, frame_index + semantic_radius+1)) - index = [ min(max(item, 0), num_frames-1) for item in seq ] - coeff_3dmm_g = coeff_3dmm[index, :] - return coeff_3dmm_g.transpose(1,0) - -def gen_camera_pose(camera_degree_list, frame_num, batch_size): - - new_degree_list = [] - if len(camera_degree_list) == 1: - for _ in range(frame_num): - new_degree_list.append(camera_degree_list[0]) - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - new_degree_list.append(new_degree_list[-1]) - new_degree_np = np.array(new_degree_list).reshape(batch_size, -1) - return new_degree_np - - degree_sum = 0. - for i, degree in enumerate(camera_degree_list[1:]): - degree_sum += abs(degree-camera_degree_list[i]) - - degree_per_frame = degree_sum/(frame_num-1) - for i, degree in enumerate(camera_degree_list[1:]): - degree_last = camera_degree_list[i] - degree_step = degree_per_frame * abs(degree-degree_last)/(degree-degree_last) - new_degree_list = new_degree_list + list(np.arange(degree_last, degree, degree_step)) - if len(new_degree_list) > frame_num: - new_degree_list = new_degree_list[:frame_num] - elif len(new_degree_list) < frame_num: - for _ in range(frame_num-len(new_degree_list)): - new_degree_list.append(new_degree_list[-1]) - print(len(new_degree_list)) - print(frame_num) - - remainder = frame_num%batch_size - if remainder!=0: - for _ in range(batch_size-remainder): - new_degree_list.append(new_degree_list[-1]) - new_degree_np = np.array(new_degree_list).reshape(batch_size, -1) - return new_degree_np - diff --git a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py b/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py deleted file mode 100644 index 6f570aad058ae63aaaa6733504d0d5ed4ba190a1..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan2.py +++ /dev/null @@ -1,981 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Network architectures from the paper -"Analyzing and Improving the Image Quality of StyleGAN". -Matches the original implementation of configs E-F by Karras et al. at -https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py""" - -import numpy as np -import torch -import torch.nn.functional as F -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_resample -from torch_utils.ops import upfirdn2d -from torch_utils.ops import bias_act -from torch_utils.ops import fma - -# ---------------------------------------------------------------------------- - - -@misc.profiled_function -def normalize_2nd_moment(x, dim=1, eps=1e-8): - return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt() - -# ---------------------------------------------------------------------------- - - -@misc.profiled_function -def modulated_conv2d( - # Input tensor of shape [batch_size, in_channels, in_height, in_width]. - x, - # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width]. - weight, - # Modulation coefficients of shape [batch_size, in_channels]. - styles, - noise=None, # Optional noise tensor to add to the output activations. - up=1, # Integer upsampling factor. - down=1, # Integer downsampling factor. - padding=0, # Padding with respect to the upsampled image. - # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter(). - resample_filter=None, - demodulate=True, # Apply weight demodulation? - # False = convolution, True = correlation (matches torch.nn.functional.conv2d). - flip_weight=True, - # Perform modulation, convolution, and demodulation as a single fused operation? - fused_modconv=True, -): - batch_size = x.shape[0] - out_channels, in_channels, kh, kw = weight.shape - misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(styles, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs to avoid FP16 overflow. - if x.dtype == torch.float16 and demodulate: - weight = weight * (1 / np.sqrt(in_channels * kh * kw) / - weight.norm(float('inf'), dim=[1, 2, 3], keepdim=True)) # max_Ikk - styles = styles / \ - styles.norm(float('inf'), dim=1, keepdim=True) # max_I - - # Calculate per-sample weights and demodulation coefficients. - w = None - dcoefs = None - if demodulate or fused_modconv: - w = weight.unsqueeze(0) # [NOIkk] - w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk] - if demodulate: - dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO] - if demodulate and fused_modconv: - w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk] - - # Execute by scaling the activations before and after the convolution. - if not fused_modconv: - x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1) - x = conv2d_resample.conv2d_resample(x=x, w=weight.to( - x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight) - if demodulate and noise is not None: - x = fma.fma(x, dcoefs.to(x.dtype).reshape( - batch_size, -1, 1, 1), noise.to(x.dtype)) - elif demodulate: - x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1) - elif noise is not None: - x = x.add_(noise.to(x.dtype)) - return x - - # Execute as one fused op using grouped convolution. - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(batch_size) - misc.assert_shape(x, [batch_size, in_channels, None, None]) - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_resample.conv2d_resample(x=x, w=w.to( - x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight) - x = x.reshape(batch_size, -1, *x.shape[2:]) - if noise is not None: - x = x.add_(noise) - return x - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class FullyConnectedLayer(torch.nn.Module): - def __init__(self, - in_features, # Number of input features. - out_features, # Number of output features. - bias=True, # Apply additive bias before the activation function? - # Activation function: 'relu', 'lrelu', etc. - activation='linear', - lr_multiplier=1, # Learning rate multiplier. - bias_init=0, # Initial value for the additive bias. - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.activation = activation - self.weight = torch.nn.Parameter(torch.randn( - [out_features, in_features]) / lr_multiplier) - self.bias = torch.nn.Parameter(torch.full( - [out_features], np.float32(bias_init))) if bias else None - self.weight_gain = lr_multiplier / np.sqrt(in_features) - self.bias_gain = lr_multiplier - - def forward(self, x): - w = self.weight.to(x.dtype) * self.weight_gain - b = self.bias - if b is not None: - b = b.to(x.dtype) - if self.bias_gain != 1: - b = b * self.bias_gain - - if self.activation == 'linear' and b is not None: - x = torch.addmm(b.unsqueeze(0), x, w.t()) - else: - x = x.matmul(w.t()) - x = bias_act.bias_act(x, b, act=self.activation) - return x - - def extra_repr(self): - return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class Conv2dLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - # Width and height of the convolution kernel. - kernel_size, - bias=True, # Apply additive bias before the activation function? - # Activation function: 'relu', 'lrelu', etc. - activation='linear', - up=1, # Integer upsampling factor. - down=1, # Integer downsampling factor. - # Low-pass filter to apply when resampling activations. - resample_filter=[1, 3, 3, 1], - # Clamp the output to +-X, None = disable clamping. - conv_clamp=None, - channels_last=False, # Expect the input to have memory_format=channels_last? - trainable=True, # Update the weights of this layer during training? - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.activation = activation - self.up = up - self.down = down - self.conv_clamp = conv_clamp - self.register_buffer( - 'resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - self.act_gain = bias_act.activation_funcs[activation].def_gain - - memory_format = torch.channels_last if channels_last else torch.contiguous_format - weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to( - memory_format=memory_format) - bias = torch.zeros([out_channels]) if bias else None - if trainable: - self.weight = torch.nn.Parameter(weight) - self.bias = torch.nn.Parameter(bias) if bias is not None else None - else: - self.register_buffer('weight', weight) - if bias is not None: - self.register_buffer('bias', bias) - else: - self.bias = None - - def forward(self, x, gain=1): - w = self.weight * self.weight_gain - b = self.bias.to(x.dtype) if self.bias is not None else None - flip_weight = (self.up == 1) # slightly faster - x = conv2d_resample.conv2d_resample(x=x, w=w.to( - x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, b, act=self.activation, - gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},', - f'up={self.up}, down={self.down}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class MappingNetwork(torch.nn.Module): - def __init__(self, - # Input latent (Z) dimensionality, 0 = no latent. - z_dim, - # Conditioning label (C) dimensionality, 0 = no label. - c_dim, - # Intermediate latent (W) dimensionality. - w_dim, - # Number of intermediate latents to output, None = do not broadcast. - num_ws, - num_layers=8, # Number of mapping layers. - # Label embedding dimensionality, None = same as w_dim. - embed_features=None, - # Number of intermediate features in the mapping layers, None = same as w_dim. - layer_features=None, - # Activation function: 'relu', 'lrelu', etc. - activation='lrelu', - # Learning rate multiplier for the mapping layers. - lr_multiplier=0.01, - # Decay for tracking the moving average of W during training, None = do not track. - w_avg_beta=0.998, - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.num_ws = num_ws - self.num_layers = num_layers - self.w_avg_beta = w_avg_beta - - if embed_features is None: - embed_features = w_dim - if c_dim == 0: - embed_features = 0 - if layer_features is None: - layer_features = w_dim - features_list = [z_dim + embed_features] + \ - [layer_features] * (num_layers - 1) + [w_dim] - - if c_dim > 0: - self.embed = FullyConnectedLayer(c_dim, embed_features) - for idx in range(num_layers): - in_features = features_list[idx] - out_features = features_list[idx + 1] - layer = FullyConnectedLayer( - in_features, out_features, activation=activation, lr_multiplier=lr_multiplier) - setattr(self, f'fc{idx}', layer) - - if num_ws is not None and w_avg_beta is not None: - self.register_buffer('w_avg', torch.zeros([w_dim])) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): - # Embed, normalize, and concat inputs. - x = None - with torch.autograd.profiler.record_function('input'): - if self.z_dim > 0: - misc.assert_shape(z, [None, self.z_dim]) - x = normalize_2nd_moment(z.to(torch.float32)) - if self.c_dim > 0: - misc.assert_shape(c, [None, self.c_dim]) - y = normalize_2nd_moment(self.embed(c.to(torch.float32))) - x = torch.cat([x, y], dim=1) if x is not None else y - - # Main layers. - for idx in range(self.num_layers): - layer = getattr(self, f'fc{idx}') - x = layer(x) - - # Update moving average of W. - if update_emas and self.w_avg_beta is not None: - with torch.autograd.profiler.record_function('update_w_avg'): - self.w_avg.copy_(x.detach().mean( - dim=0).lerp(self.w_avg, self.w_avg_beta)) - - # Broadcast. - if self.num_ws is not None: - with torch.autograd.profiler.record_function('broadcast'): - x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) - - # Apply truncation. - if truncation_psi != 1: - with torch.autograd.profiler.record_function('truncate'): - assert self.w_avg_beta is not None - if self.num_ws is None or truncation_cutoff is None: - x = self.w_avg.lerp(x, truncation_psi) - else: - x[:, :truncation_cutoff] = self.w_avg.lerp( - x[:, :truncation_cutoff], truncation_psi) - return x - - def extra_repr(self): - return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - out_channels, # Number of output channels. - # Intermediate latent (W) dimensionality. - w_dim, - resolution, # Resolution of this layer. - kernel_size=3, # Convolution kernel size. - up=1, # Integer upsampling factor. - use_noise=True, # Enable noise input? - # Activation function: 'relu', 'lrelu', etc. - activation='lrelu', - # Low-pass filter to apply when resampling activations. - resample_filter=[1, 3, 3, 1], - # Clamp the output of convolution layers to +-X, None = disable clamping. - conv_clamp=None, - channels_last=False, # Use channels_last format for the weights? - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.resolution = resolution - self.up = up - self.use_noise = use_noise - self.activation = activation - self.conv_clamp = conv_clamp - self.register_buffer( - 'resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.padding = kernel_size // 2 - self.act_gain = bias_act.activation_funcs[activation].def_gain - - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn( - [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - if use_noise: - self.register_buffer( - 'noise_const', torch.randn([resolution, resolution])) - self.noise_strength = torch.nn.Parameter(torch.zeros([])) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - - def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1): - assert noise_mode in ['random', 'const', 'none'] - in_resolution = self.resolution // self.up - misc.assert_shape(x, [None, self.in_channels, - in_resolution, in_resolution]) - styles = self.affine(w) - - noise = None - if self.use_noise and noise_mode == 'random': - noise = torch.randn([x.shape[0], 1, self.resolution, - self.resolution], device=x.device) * self.noise_strength - if self.use_noise and noise_mode == 'const': - noise = self.noise_const * self.noise_strength - - flip_weight = (self.up == 1) # slightly faster - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up, - padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv) - - act_gain = self.act_gain * gain - act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None - x = bias_act.bias_act(x, self.bias.to( - x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp) - return x - - def extra_repr(self): - return ' '.join([ - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},', - f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class ToRGBLayer(torch.nn.Module): - def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.w_dim = w_dim - self.conv_clamp = conv_clamp - self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1) - memory_format = torch.channels_last if channels_last else torch.contiguous_format - self.weight = torch.nn.Parameter(torch.randn( - [out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)) - self.bias = torch.nn.Parameter(torch.zeros([out_channels])) - self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2)) - - def forward(self, x, w, fused_modconv=True): - styles = self.affine(w) * self.weight_gain - x = modulated_conv2d(x=x, weight=self.weight, styles=styles, - demodulate=False, fused_modconv=fused_modconv) - x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp) - return x - - def extra_repr(self): - return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisBlock(torch.nn.Module): - def __init__(self, - # Number of input channels, 0 = first block. - in_channels, - # Number of output channels. - out_channels, - # Intermediate latent (W) dimensionality. - w_dim, - # Resolution of this block. - resolution, - # Number of output color channels. - img_channels, - is_last, # Is this the last block? - # Architecture: 'orig', 'skip', 'resnet'. - architecture='skip', - # Low-pass filter to apply when resampling activations. - resample_filter=[1, 3, 3, 1], - # Clamp the output of convolution layers to +-X, None = disable clamping. - conv_clamp=256, - use_fp16=False, # Use FP16 for this block? - fp16_channels_last=False, # Use channels-last memory format with FP16? - # Default value of fused_modconv. 'inference_only' = True for inference, False for training. - fused_modconv_default=True, - # Arguments for SynthesisLayer. - **layer_kwargs, - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.w_dim = w_dim - self.resolution = resolution - self.img_channels = img_channels - self.is_last = is_last - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.fused_modconv_default = fused_modconv_default - self.register_buffer( - 'resample_filter', upfirdn2d.setup_filter(resample_filter)) - self.num_conv = 0 - self.num_torgb = 0 - - if in_channels == 0: - self.const = torch.nn.Parameter(torch.randn( - [out_channels, resolution, resolution])) - - if in_channels != 0: - self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2, - resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution, - conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs) - self.num_conv += 1 - - if is_last or architecture == 'skip': - self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim, - conv_clamp=conv_clamp, channels_last=self.channels_last) - self.num_torgb += 1 - - if in_channels != 0 and architecture == 'resnet': - self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2, - resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs): - _ = update_emas # unused - misc.assert_shape( - ws, [None, self.num_conv + self.num_torgb, self.w_dim]) - w_iter = iter(ws.unbind(dim=1)) - if ws.device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - if fused_modconv is None: - fused_modconv = self.fused_modconv_default - if fused_modconv == 'inference_only': - fused_modconv = (not self.training) - - # Input. - if self.in_channels == 0: - x = self.const.to(dtype=dtype, memory_format=memory_format) - x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1]) - else: - misc.assert_shape(x, [None, self.in_channels, - self.resolution // 2, self.resolution // 2]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # Main layers. - if self.in_channels == 0: - x = self.conv1(x, next(w_iter), - fused_modconv=fused_modconv, **layer_kwargs) - elif self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x, next(w_iter), - fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, - gain=np.sqrt(0.5), **layer_kwargs) - x = y.add_(x) - else: - x = self.conv0(x, next(w_iter), - fused_modconv=fused_modconv, **layer_kwargs) - x = self.conv1(x, next(w_iter), - fused_modconv=fused_modconv, **layer_kwargs) - - # ToRGB. - if img is not None: - misc.assert_shape( - img, [None, self.img_channels, self.resolution // 2, self.resolution // 2]) - img = upfirdn2d.upsample2d(img, self.resample_filter) - if self.is_last or self.architecture == 'skip': - y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv) - y = y.to(dtype=torch.float32, - memory_format=torch.contiguous_format) - img = img.add_(y) if img is not None else y - - assert x.dtype == dtype - assert img is None or img.dtype == torch.float32 - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - # Intermediate latent (W) dimensionality. - w_dim, - img_resolution, # Output image resolution. - img_channels, # Number of color channels. - # Overall multiplier for the number of channels. - channel_base=32768, - # Maximum number of channels in any layer. - channel_max=512, - # Use FP16 for the N highest resolutions. - num_fp16_res=4, - **block_kwargs, # Arguments for SynthesisBlock. - ): - assert img_resolution >= 4 and img_resolution & ( - img_resolution - 1) == 0 - super().__init__() - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.num_fp16_res = num_fp16_res - self.block_resolutions = [ - 2 ** i for i in range(2, self.img_resolution_log2 + 1)] - channels_dict = {res: min(channel_base // res, channel_max) - for res in self.block_resolutions} - fp16_resolution = max( - 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - self.num_ws = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res // 2] if res > 4 else 0 - out_channels = channels_dict[res] - use_fp16 = (res >= fp16_resolution) - is_last = (res == self.img_resolution) - block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res, - img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs) - self.num_ws += block.num_conv - if is_last: - self.num_ws += block.num_torgb - setattr(self, f'b{res}', block) - - def forward(self, ws, return_feature=False, **block_kwargs): - block_ws = [] - features = [] - with torch.autograd.profiler.record_function('split_ws'): - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32) - w_idx = 0 - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - block_ws.append( - ws.narrow(1, w_idx, block.num_conv + block.num_torgb)) - w_idx += block.num_conv - - x = img = None - for res, cur_ws in zip(self.block_resolutions, block_ws): - block = getattr(self, f'b{res}') - x, img = block(x, img, cur_ws, **block_kwargs) - features.append(x) - if return_feature: - return img, features - else: - return img - - def extra_repr(self): - return ' '.join([ - f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', - f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', - f'num_fp16_res={self.num_fp16_res:d}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - # Conditioning label (C) dimensionality. - c_dim, - # Intermediate latent (W) dimensionality. - w_dim, - img_resolution, # Output resolution. - img_channels, # Number of output color channels. - mapping_kwargs={}, # Arguments for MappingNetwork. - synthesis_kwargs={}, # Arguments for SynthesisNetwork. - resize=None, - **synthesis_kwargs2, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - if len(synthesis_kwargs) == 0: - synthesis_kwargs = synthesis_kwargs2 - self.synthesis = SynthesisNetwork( - w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork( - z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) - self.resize = resize - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs): - if input_is_w: - ws = z - if ws.dim() == 2: - ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1]) - else: - ws = self.mapping(z, c, truncation_psi=truncation_psi, - truncation_cutoff=truncation_cutoff, update_emas=update_emas) - img = self.synthesis(ws, update_emas=update_emas, - return_feature=return_feature, **synthesis_kwargs) - if return_feature: - img, feature = img - if self.resize is not None: - img = imresize(img, [self.resize, self.resize]) - if return_feature: - return img, feature - else: - return img - - -def imresize(image, size): - dim = image.dim() - if dim == 3: - image = image.unsqueeze(1) - b, _, h, w = image.shape - if size[0] > h: - image = F.interpolate(image, size, mode='bilinear') - elif size[0] < h: - image = F.interpolate(image, size, mode='area') - if dim == 3: - image = image.squeeze(1) - return image - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class DiscriminatorBlock(torch.nn.Module): - def __init__(self, - # Number of input channels, 0 = first block. - in_channels, - # Number of intermediate channels. - tmp_channels, - # Number of output channels. - out_channels, - # Resolution of this block. - resolution, - # Number of input color channels. - img_channels, - # Index of the first layer. - first_layer_idx, - # Architecture: 'orig', 'skip', 'resnet'. - architecture='resnet', - # Activation function: 'relu', 'lrelu', etc. - activation='lrelu', - # Low-pass filter to apply when resampling activations. - resample_filter=[1, 3, 3, 1], - # Clamp the output of convolution layers to +-X, None = disable clamping. - conv_clamp=None, - use_fp16=False, # Use FP16 for this block? - fp16_channels_last=False, # Use channels-last memory format with FP16? - # Freeze-D: Number of layers to freeze. - freeze_layers=0, - ): - assert in_channels in [0, tmp_channels] - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.resolution = resolution - self.img_channels = img_channels - self.first_layer_idx = first_layer_idx - self.architecture = architecture - self.use_fp16 = use_fp16 - self.channels_last = (use_fp16 and fp16_channels_last) - self.register_buffer( - 'resample_filter', upfirdn2d.setup_filter(resample_filter)) - - self.num_layers = 0 - - def trainable_gen(): - while True: - layer_idx = self.first_layer_idx + self.num_layers - trainable = (layer_idx >= freeze_layers) - self.num_layers += 1 - yield trainable - trainable_iter = trainable_gen() - - if in_channels == 0 or architecture == 'skip': - self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation, - trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last) - - self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last) - - if architecture == 'resnet': - self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2, - trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last) - - def forward(self, x, img, force_fp32=False): - if (x if x is not None else img).device.type != 'cuda': - force_fp32 = True - dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32 - memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format - - # Input. - if x is not None: - misc.assert_shape(x, [None, self.in_channels, - self.resolution, self.resolution]) - x = x.to(dtype=dtype, memory_format=memory_format) - - # FromRGB. - if self.in_channels == 0 or self.architecture == 'skip': - misc.assert_shape( - img, [None, self.img_channels, self.resolution, self.resolution]) - img = img.to(dtype=dtype, memory_format=memory_format) - y = self.fromrgb(img) - x = x + y if x is not None else y - img = upfirdn2d.downsample2d( - img, self.resample_filter) if self.architecture == 'skip' else None - - # Main layers. - if self.architecture == 'resnet': - y = self.skip(x, gain=np.sqrt(0.5)) - x = self.conv0(x) - x = self.conv1(x, gain=np.sqrt(0.5)) - x = y.add_(x) - else: - x = self.conv0(x) - x = self.conv1(x) - - assert x.dtype == dtype - return x, img - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class MinibatchStdLayer(torch.nn.Module): - def __init__(self, group_size, num_channels=1): - super().__init__() - self.group_size = group_size - self.num_channels = num_channels - - def forward(self, x): - N, C, H, W = x.shape - with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants - G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor( - N)) if self.group_size is not None else N - F = self.num_channels - c = C // F - - # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c. - y = x.reshape(G, -1, F, c, H, W) - # [GnFcHW] Subtract mean over group. - y = y - y.mean(dim=0) - # [nFcHW] Calc variance over group. - y = y.square().mean(dim=0) - y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group. - # [nF] Take average over channels and pixels. - y = y.mean(dim=[2, 3, 4]) - y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions. - # [NFHW] Replicate over group and pixels. - y = y.repeat(G, 1, H, W) - # [NCHW] Append to input as new channels. - x = torch.cat([x, y], dim=1) - return x - - def extra_repr(self): - return f'group_size={self.group_size}, num_channels={self.num_channels:d}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class DiscriminatorEpilogue(torch.nn.Module): - def __init__(self, - in_channels, # Number of input channels. - # Dimensionality of mapped conditioning label, 0 = no label. - cmap_dim, - resolution, # Resolution of this block. - # Number of input color channels. - img_channels, - # Architecture: 'orig', 'skip', 'resnet'. - architecture='resnet', - # Group size for the minibatch standard deviation layer, None = entire minibatch. - mbstd_group_size=4, - # Number of features for the minibatch standard deviation layer, 0 = disable. - mbstd_num_channels=1, - # Activation function: 'relu', 'lrelu', etc. - activation='lrelu', - # Clamp the output of convolution layers to +-X, None = disable clamping. - conv_clamp=None, - ): - assert architecture in ['orig', 'skip', 'resnet'] - super().__init__() - self.in_channels = in_channels - self.cmap_dim = cmap_dim - self.resolution = resolution - self.img_channels = img_channels - self.architecture = architecture - - if architecture == 'skip': - self.fromrgb = Conv2dLayer( - img_channels, in_channels, kernel_size=1, activation=activation) - self.mbstd = MinibatchStdLayer( - group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None - self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, - kernel_size=3, activation=activation, conv_clamp=conv_clamp) - self.fc = FullyConnectedLayer( - in_channels * (resolution ** 2), in_channels, activation=activation) - self.out = FullyConnectedLayer( - in_channels, 1 if cmap_dim == 0 else cmap_dim) - - def forward(self, x, img, cmap, force_fp32=False): - misc.assert_shape(x, [None, self.in_channels, - self.resolution, self.resolution]) # [NCHW] - _ = force_fp32 # unused - dtype = torch.float32 - memory_format = torch.contiguous_format - - # FromRGB. - x = x.to(dtype=dtype, memory_format=memory_format) - if self.architecture == 'skip': - misc.assert_shape( - img, [None, self.img_channels, self.resolution, self.resolution]) - img = img.to(dtype=dtype, memory_format=memory_format) - x = x + self.fromrgb(img) - - # Main layers. - if self.mbstd is not None: - x = self.mbstd(x) - x = self.conv(x) - x = self.fc(x.flatten(1)) - x = self.out(x) - - # Conditioning. - if self.cmap_dim > 0: - misc.assert_shape(cmap, [None, self.cmap_dim]) - x = (x * cmap).sum(dim=1, keepdim=True) * \ - (1 / np.sqrt(self.cmap_dim)) - - assert x.dtype == dtype - return x - - def extra_repr(self): - return f'resolution={self.resolution:d}, architecture={self.architecture:s}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class Discriminator(torch.nn.Module): - def __init__(self, - # Conditioning label (C) dimensionality. - c_dim, - img_resolution, # Input resolution. - # Number of input color channels. - img_channels, - # Architecture: 'orig', 'skip', 'resnet'. - architecture='resnet', - # Overall multiplier for the number of channels. - channel_base=32768, - # Maximum number of channels in any layer. - channel_max=512, - # Use FP16 for the N highest resolutions. - num_fp16_res=4, - # Clamp the output of convolution layers to +-X, None = disable clamping. - conv_clamp=256, - # Dimensionality of mapped conditioning label, None = default. - cmap_dim=None, - block_kwargs={}, # Arguments for DiscriminatorBlock. - mapping_kwargs={}, # Arguments for MappingNetwork. - # Arguments for DiscriminatorEpilogue. - epilogue_kwargs={}, - ): - super().__init__() - self.c_dim = c_dim - self.img_resolution = img_resolution - self.img_resolution_log2 = int(np.log2(img_resolution)) - self.img_channels = img_channels - self.block_resolutions = [ - 2 ** i for i in range(self.img_resolution_log2, 2, -1)] - channels_dict = {res: min(channel_base // res, channel_max) - for res in self.block_resolutions + [4]} - fp16_resolution = max( - 2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8) - - if cmap_dim is None: - cmap_dim = channels_dict[4] - if c_dim == 0: - cmap_dim = 0 - - common_kwargs = dict(img_channels=img_channels, - architecture=architecture, conv_clamp=conv_clamp) - cur_layer_idx = 0 - for res in self.block_resolutions: - in_channels = channels_dict[res] if res < img_resolution else 0 - tmp_channels = channels_dict[res] - out_channels = channels_dict[res // 2] - use_fp16 = (res >= fp16_resolution) - block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res, - first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs) - setattr(self, f'b{res}', block) - cur_layer_idx += block.num_layers - if c_dim > 0: - self.mapping = MappingNetwork( - z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs) - self.b4 = DiscriminatorEpilogue( - channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs) - - def forward(self, img, c, update_emas=False, **block_kwargs): - _ = update_emas # unused - x = None - for res in self.block_resolutions: - block = getattr(self, f'b{res}') - x, img = block(x, img, **block_kwargs) - - cmap = None - if self.c_dim > 0: - cmap = self.mapping(None, c) - x = self.b4(x, img, cmap) - return x - - def extra_repr(self): - return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}' - -# ---------------------------------------------------------------------------- diff --git a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py b/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py deleted file mode 100644 index 338fd287110a02d76c0b7b03fbf041c340f5adb9..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/training/networks_stylegan3.py +++ /dev/null @@ -1,645 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Generator architecture from the paper -"Alias-Free Generative Adversarial Networks".""" - -import numpy as np -import scipy.signal -import scipy.optimize -import torch -import torch.nn.functional as F -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_gradfix -from torch_utils.ops import filtered_lrelu -from torch_utils.ops import bias_act - -# ---------------------------------------------------------------------------- - - -@misc.profiled_function -def modulated_conv2d( - # Input tensor: [batch_size, in_channels, in_height, in_width] - x, - # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width] - w, - s, # Style tensor: [batch_size, in_channels] - demodulate=True, # Apply weight demodulation? - padding=0, # Padding: int or [padH, padW] - input_gain=None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels] -): - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(x.shape[0]) - out_channels, in_channels, kh, kw = w.shape - misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(s, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs. - if demodulate: - w = w * w.square().mean([1, 2, 3], keepdim=True).rsqrt() - s = s * s.square().mean().rsqrt() - - # Modulate weights. - w = w.unsqueeze(0) # [NOIkk] - w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Demodulate weights. - if demodulate: - dcoefs = (w.square().sum(dim=[2, 3, 4]) + 1e-8).rsqrt() # [NO] - w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Apply input scaling. - if input_gain is not None: - input_gain = input_gain.expand(batch_size, in_channels) # [NI] - w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Execute as one fused op using grouped convolution. - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_gradfix.conv2d(input=x, weight=w.to( - x.dtype), padding=padding, groups=batch_size) - x = x.reshape(batch_size, -1, *x.shape[2:]) - return x - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class FullyConnectedLayer(torch.nn.Module): - def __init__(self, - in_features, # Number of input features. - out_features, # Number of output features. - # Activation function: 'relu', 'lrelu', etc. - activation='linear', - bias=True, # Apply additive bias before the activation function? - lr_multiplier=1, # Learning rate multiplier. - # Initial standard deviation of the weight tensor. - weight_init=1, - bias_init=0, # Initial value of the additive bias. - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.activation = activation - self.weight = torch.nn.Parameter(torch.randn( - [out_features, in_features]) * (weight_init / lr_multiplier)) - bias_init = np.broadcast_to(np.asarray( - bias_init, dtype=np.float32), [out_features]) - self.bias = torch.nn.Parameter(torch.from_numpy( - bias_init / lr_multiplier)) if bias else None - self.weight_gain = lr_multiplier / np.sqrt(in_features) - self.bias_gain = lr_multiplier - - def forward(self, x): - w = self.weight.to(x.dtype) * self.weight_gain - b = self.bias - if b is not None: - b = b.to(x.dtype) - if self.bias_gain != 1: - b = b * self.bias_gain - if self.activation == 'linear' and b is not None: - x = torch.addmm(b.unsqueeze(0), x, w.t()) - else: - x = x.matmul(w.t()) - x = bias_act.bias_act(x, b, act=self.activation) - return x - - def extra_repr(self): - return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class MappingNetwork(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - # Conditioning label (C) dimensionality, 0 = no labels. - c_dim, - # Intermediate latent (W) dimensionality. - w_dim, - # Number of intermediate latents to output. - num_ws, - num_layers=2, # Number of mapping layers. - # Learning rate multiplier for the mapping layers. - lr_multiplier=0.01, - # Decay for tracking the moving average of W during training. - w_avg_beta=0.998, - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.num_ws = num_ws - self.num_layers = num_layers - self.w_avg_beta = w_avg_beta - - # Construct layers. - self.embed = FullyConnectedLayer( - self.c_dim, self.w_dim) if self.c_dim > 0 else None - features = [self.z_dim + (self.w_dim if self.c_dim > - 0 else 0)] + [self.w_dim] * self.num_layers - for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]): - layer = FullyConnectedLayer( - in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier) - setattr(self, f'fc{idx}', layer) - self.register_buffer('w_avg', torch.zeros([w_dim])) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): - misc.assert_shape(z, [None, self.z_dim]) - if truncation_cutoff is None: - truncation_cutoff = self.num_ws - - # Embed, normalize, and concatenate inputs. - x = z.to(torch.float32) - x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt() - if self.c_dim > 0: - misc.assert_shape(c, [None, self.c_dim]) - y = self.embed(c.to(torch.float32)) - y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt() - x = torch.cat([x, y], dim=1) if x is not None else y - - # Execute layers. - for idx in range(self.num_layers): - x = getattr(self, f'fc{idx}')(x) - - # Update moving average of W. - if update_emas: - self.w_avg.copy_(x.detach().mean( - dim=0).lerp(self.w_avg, self.w_avg_beta)) - - # Broadcast and apply truncation. - x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) - if truncation_psi != 1: - x[:, :truncation_cutoff] = self.w_avg.lerp( - x[:, :truncation_cutoff], truncation_psi) - return x - - def extra_repr(self): - return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisInput(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - channels, # Number of output channels. - size, # Output spatial size: int or [width, height]. - sampling_rate, # Output sampling rate. - bandwidth, # Output bandwidth. - ): - super().__init__() - self.w_dim = w_dim - self.channels = channels - self.size = np.broadcast_to(np.asarray(size), [2]) - self.sampling_rate = sampling_rate - self.bandwidth = bandwidth - - # Draw random frequencies from uniform 2D disc. - freqs = torch.randn([self.channels, 2]) - radii = freqs.square().sum(dim=1, keepdim=True).sqrt() - freqs /= radii * radii.square().exp().pow(0.25) - freqs *= bandwidth - phases = torch.rand([self.channels]) - 0.5 - - # Setup parameters and buffers. - self.weight = torch.nn.Parameter( - torch.randn([self.channels, self.channels])) - self.affine = FullyConnectedLayer( - w_dim, 4, weight_init=0, bias_init=[1, 0, 0, 0]) - # User-specified inverse transform wrt. resulting image. - self.register_buffer('transform', torch.eye(3, 3)) - self.register_buffer('freqs', freqs) - self.register_buffer('phases', phases) - - def forward(self, w): - # Introduce batch dimension. - transforms = self.transform.unsqueeze(0) # [batch, row, col] - freqs = self.freqs.unsqueeze(0) # [batch, channel, xy] - phases = self.phases.unsqueeze(0) # [batch, channel] - - # Apply learned transformation. - t = self.affine(w) # t = (r_c, r_s, t_x, t_y) - # t' = (r'_c, r'_s, t'_x, t'_y) - t = t / t[:, :2].norm(dim=1, keepdim=True) - # Inverse rotation wrt. resulting image. - m_r = torch.eye(3, device=w.device).unsqueeze( - 0).repeat([w.shape[0], 1, 1]) - m_r[:, 0, 0] = t[:, 0] # r'_c - m_r[:, 0, 1] = -t[:, 1] # r'_s - m_r[:, 1, 0] = t[:, 1] # r'_s - m_r[:, 1, 1] = t[:, 0] # r'_c - # Inverse translation wrt. resulting image. - m_t = torch.eye(3, device=w.device).unsqueeze( - 0).repeat([w.shape[0], 1, 1]) - m_t[:, 0, 2] = -t[:, 2] # t'_x - m_t[:, 1, 2] = -t[:, 3] # t'_y - # First rotate resulting image, then translate, and finally apply user-specified transform. - transforms = m_r @ m_t @ transforms - - # Transform frequencies. - phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2) - freqs = freqs @ transforms[:, :2, :2] - - # Dampen out-of-band frequencies that may occur due to the user-specified transform. - amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / - (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1) - - # Construct sampling grid. - theta = torch.eye(2, 3, device=w.device) - theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate - theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate - grids = torch.nn.functional.affine_grid(theta.unsqueeze( - 0), [1, 1, self.size[1], self.size[0]], align_corners=False) - - # Compute Fourier features. - x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2) - ).squeeze(3) # [batch, height, width, channel] - x = x + phases.unsqueeze(1).unsqueeze(2) - x = torch.sin(x * (np.pi * 2)) - x = x * amplitudes.unsqueeze(1).unsqueeze(2) - - # Apply trainable mapping. - weight = self.weight / np.sqrt(self.channels) - x = x @ weight.t() - - # Ensure correct shape. - x = x.permute(0, 3, 1, 2) # [batch, channel, height, width] - misc.assert_shape(x, [w.shape[0], self.channels, - int(self.size[1]), int(self.size[0])]) - return x - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},', - f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - # Intermediate latent (W) dimensionality. - w_dim, - is_torgb, # Is this the final ToRGB layer? - is_critically_sampled, # Does this layer use critical sampling? - use_fp16, # Does this layer use FP16? - - # Input & output specifications. - in_channels, # Number of input channels. - out_channels, # Number of output channels. - # Input spatial size: int or [width, height]. - in_size, - # Output spatial size: int or [width, height]. - out_size, - in_sampling_rate, # Input sampling rate (s). - out_sampling_rate, # Output sampling rate (s). - # Input cutoff frequency (f_c). - in_cutoff, - # Output cutoff frequency (f_c). - out_cutoff, - # Input transition band half-width (f_h). - in_half_width, - # Output Transition band half-width (f_h). - out_half_width, - - # Hyperparameters. - # Convolution kernel size. Ignored for final the ToRGB layer. - conv_kernel=3, - # Low-pass filter size relative to the lower resolution when up/downsampling. - filter_size=6, - # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer. - lrelu_upsampling=2, - # Use radially symmetric downsampling filter? Ignored for critically sampled layers. - use_radial_filters=False, - # Clamp the output to [-X, +X], None = disable clamping. - conv_clamp=256, - # Decay rate for the moving average of input magnitudes. - magnitude_ema_beta=0.999, - ): - super().__init__() - self.w_dim = w_dim - self.is_torgb = is_torgb - self.is_critically_sampled = is_critically_sampled - self.use_fp16 = use_fp16 - self.in_channels = in_channels - self.out_channels = out_channels - self.in_size = np.broadcast_to(np.asarray(in_size), [2]) - self.out_size = np.broadcast_to(np.asarray(out_size), [2]) - self.in_sampling_rate = in_sampling_rate - self.out_sampling_rate = out_sampling_rate - self.tmp_sampling_rate = max( - in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling) - self.in_cutoff = in_cutoff - self.out_cutoff = out_cutoff - self.in_half_width = in_half_width - self.out_half_width = out_half_width - self.conv_kernel = 1 if is_torgb else conv_kernel - self.conv_clamp = conv_clamp - self.magnitude_ema_beta = magnitude_ema_beta - - # Setup parameters and buffers. - self.affine = FullyConnectedLayer( - self.w_dim, self.in_channels, bias_init=1) - self.weight = torch.nn.Parameter(torch.randn( - [self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel])) - self.bias = torch.nn.Parameter(torch.zeros([self.out_channels])) - self.register_buffer('magnitude_ema', torch.ones([])) - - # Design upsampling filter. - self.up_factor = int( - np.rint(self.tmp_sampling_rate / self.in_sampling_rate)) - assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate - self.up_taps = filter_size * \ - self.up_factor if self.up_factor > 1 and not self.is_torgb else 1 - self.register_buffer('up_filter', self.design_lowpass_filter( - numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate)) - - # Design downsampling filter. - self.down_factor = int( - np.rint(self.tmp_sampling_rate / self.out_sampling_rate)) - assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate - self.down_taps = filter_size * \ - self.down_factor if self.down_factor > 1 and not self.is_torgb else 1 - self.down_radial = use_radial_filters and not self.is_critically_sampled - self.register_buffer('down_filter', self.design_lowpass_filter( - numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial)) - - # Compute padding. - # Desired output size before downsampling. - pad_total = (self.out_size - 1) * self.down_factor + 1 - # Input size after upsampling. - pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor - # Size reduction caused by the filters. - pad_total += self.up_taps + self.down_taps - 2 - # Shift sample locations according to the symmetric interpretation (Appendix C.3). - pad_lo = (pad_total + self.up_factor) // 2 - pad_hi = pad_total - pad_lo - self.padding = [int(pad_lo[0]), int(pad_hi[0]), - int(pad_lo[1]), int(pad_hi[1])] - - def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False): - assert noise_mode in ['random', 'const', 'none'] # unused - misc.assert_shape(x, [None, self.in_channels, int( - self.in_size[1]), int(self.in_size[0])]) - misc.assert_shape(w, [x.shape[0], self.w_dim]) - - # Track input magnitude. - if update_emas: - with torch.autograd.profiler.record_function('update_magnitude_ema'): - magnitude_cur = x.detach().to(torch.float32).square().mean() - self.magnitude_ema.copy_(magnitude_cur.lerp( - self.magnitude_ema, self.magnitude_ema_beta)) - input_gain = self.magnitude_ema.rsqrt() - - # Execute affine layer. - styles = self.affine(w) - if self.is_torgb: - weight_gain = 1 / \ - np.sqrt(self.in_channels * (self.conv_kernel ** 2)) - styles = styles * weight_gain - - # Execute modulated conv2d. - dtype = torch.float16 if ( - self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32 - x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles, - padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain) - - # Execute bias, filtered leaky ReLU, and clamping. - gain = 1 if self.is_torgb else np.sqrt(2) - slope = 1 if self.is_torgb else 0.2 - x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype), - up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp) - - # Ensure correct shape and dtype. - misc.assert_shape(x, [None, self.out_channels, int( - self.out_size[1]), int(self.out_size[0])]) - assert x.dtype == dtype - return x - - @staticmethod - def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False): - assert numtaps >= 1 - - # Identity filter. - if numtaps == 1: - return None - - # Separable Kaiser low-pass filter. - if not radial: - f = scipy.signal.firwin( - numtaps=numtaps, cutoff=cutoff, width=width, fs=fs) - return torch.as_tensor(f, dtype=torch.float32) - - # Radially symmetric jinc-based filter. - x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs - r = np.hypot(*np.meshgrid(x, x)) - f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r) - beta = scipy.signal.kaiser_beta( - scipy.signal.kaiser_atten(numtaps, width / (fs / 2))) - w = np.kaiser(numtaps, beta) - f *= np.outer(w, w) - f /= np.sum(f) - return torch.as_tensor(f, dtype=torch.float32) - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},', - f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},', - f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},', - f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},', - f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},', - f'in_size={list(self.in_size)}, out_size={list(self.out_size)},', - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - # Intermediate latent (W) dimensionality. - w_dim, - img_resolution, # Output image resolution. - img_channels, # Number of color channels. - # Overall multiplier for the number of channels. - channel_base=32768, - # Maximum number of channels in any layer. - channel_max=512, - # Total number of layers, excluding Fourier features and ToRGB. - num_layers=14, - # Number of critically sampled layers at the end. - num_critical=2, - # Cutoff frequency of the first layer (f_{c,0}). - first_cutoff=2, - # Minimum stopband of the first layer (f_{t,0}). - first_stopband=2**2.1, - # Minimum stopband of the last layer, expressed relative to the cutoff. - last_stopband_rel=2**0.3, - # Number of additional pixels outside the image. - margin_size=10, - output_scale=0.25, # Scale factor for the output image. - # Use FP16 for the N highest resolutions. - num_fp16_res=4, - # Arguments for SynthesisLayer. - **layer_kwargs, - ): - super().__init__() - self.w_dim = w_dim - self.num_ws = num_layers + 2 - self.img_resolution = img_resolution - self.img_channels = img_channels - self.num_layers = num_layers - self.num_critical = num_critical - self.margin_size = margin_size - self.output_scale = output_scale - self.num_fp16_res = num_fp16_res - - # Geometric progression of layer cutoffs and min. stopbands. - last_cutoff = self.img_resolution / 2 # f_{c,N} - last_stopband = last_cutoff * last_stopband_rel # f_{t,N} - exponents = np.minimum( - np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1) - cutoffs = first_cutoff * \ - (last_cutoff / first_cutoff) ** exponents # f_c[i] - stopbands = first_stopband * \ - (last_stopband / first_stopband) ** exponents # f_t[i] - - # Compute remaining layer parameters. - sampling_rates = np.exp2( - np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i] - half_widths = np.maximum( - stopbands, sampling_rates / 2) - cutoffs # f_h[i] - sizes = sampling_rates + self.margin_size * 2 - sizes[-2:] = self.img_resolution - channels = np.rint(np.minimum( - (channel_base / 2) / cutoffs, channel_max)) - channels[-1] = self.img_channels - - # Construct layers. - self.input = SynthesisInput( - w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]), - sampling_rate=sampling_rates[0], bandwidth=cutoffs[0]) - self.layer_names = [] - for idx in range(self.num_layers + 1): - prev = max(idx - 1, 0) - is_torgb = (idx == self.num_layers) - is_critically_sampled = ( - idx >= self.num_layers - self.num_critical) - use_fp16 = (sampling_rates[idx] * (2 ** - self.num_fp16_res) > self.img_resolution) - layer = SynthesisLayer( - w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16, - in_channels=int(channels[prev]), out_channels=int(channels[idx]), - in_size=int(sizes[prev]), out_size=int(sizes[idx]), - in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]), - in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx], - in_half_width=half_widths[prev], out_half_width=half_widths[idx], - **layer_kwargs) - name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}' - setattr(self, name, layer) - self.layer_names.append(name) - - def forward(self, ws, return_feature=False, **layer_kwargs): - features = [] - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32).unbind(dim=1) - - # Execute layers. - x = self.input(ws[0]) - for name, w in zip(self.layer_names, ws[1:]): - x = getattr(self, name)(x, w, **layer_kwargs) - features.append(x) - if self.output_scale != 1: - x = x * self.output_scale - - # Ensure correct shape and dtype. - misc.assert_shape(x, [None, self.img_channels, - self.img_resolution, self.img_resolution]) - x = x.to(torch.float32) - if return_feature: - return x, features - else: - return x - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', - f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', - f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},', - f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}']) - -# ---------------------------------------------------------------------------- - - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - # Conditioning label (C) dimensionality. - c_dim, - # Intermediate latent (W) dimensionality. - w_dim, - img_resolution, # Output resolution. - img_channels, # Number of output color channels. - mapping_kwargs={}, # Arguments for MappingNetwork. - resize=None, - **synthesis_kwargs, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - self.synthesis = SynthesisNetwork( - w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork( - z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) - self.resize = resize - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, input_is_w=False, return_feature=False, **synthesis_kwargs): - if input_is_w: - ws = z - if ws.dim() == 2: - ws = ws.unsqueeze(1).repeat([1, self.mapping.num_ws, 1]) - else: - ws = self.mapping(z, c, truncation_psi=truncation_psi, - truncation_cutoff=truncation_cutoff, update_emas=update_emas) - img = self.synthesis(ws, update_emas=update_emas, - return_feature=return_feature, **synthesis_kwargs) - if return_feature: - img, feature = img - if self.resize is not None: - img = imresize(img, [self.resize, self.resize]) - if return_feature: - return img, feature - else: - return img - -# ---------------------------------------------------------------------------- - - -def imresize(image, size): - dim = image.dim() - if dim == 3: - image = image.unsqueeze(1) - b, _, h, w = image.shape - if size[0] > h: - image = F.interpolate(image, size, mode='bilinear') - elif size[0] < h: - image = F.interpolate(image, size, mode='area') - if dim == 3: - image = image.squeeze(1) - return image diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py deleted file mode 100644 index 00c84cc3dbd4a13d67d24aab15775c221a66059c..0000000000000000000000000000000000000000 --- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/abstract.py +++ /dev/null @@ -1,120 +0,0 @@ -import abc -import numpy as np -import pytorch_lightning as pl -from pytorch_lightning.utilities.types import ( - EVAL_DATALOADERS, - TRAIN_DATALOADERS, -) -from torch import nn -from torch.utils.data import DataLoader -import torch -from torchvision import transforms - -from src.dataset import DATASET_REGISTRY - - -class AbstractModel(pl.LightningModule): - def __init__(self, cfg): - super().__init__() - self.cfg = cfg - self.train_dataset = None - self.val_dataset = None - self.metric_evaluator = None - self.init_model() - - def setup(self, stage): - if stage in ["fit", "validate", "test"]: - self.train_dataset = DATASET_REGISTRY.get("BlenderDataset")( - **self.cfg["dataset"]["train"]["params"], - ) - - self.val_dataset = DATASET_REGISTRY.get("BlenderDataset")( - **self.cfg["dataset"]["val"]["params"], - ) - # self.metric_evaluator = SHRECMetricEvaluator( - # embed_dim=self.cfg["model"]["embed_dim"] - # ) - @abc.abstractmethod - def init_model(self): - """ - Function to initialize model - """ - raise NotImplementedError - - @abc.abstractmethod - def forward(self, batch): - raise NotImplementedError - - @abc.abstractmethod - def compute_loss(self, forwarded_batch, input_batch): - """ - Function to compute loss - Args: - forwarded_batch: output of `forward` method - input_batch: input of batch method - - Returns: - loss: computed loss - """ - raise NotImplementedError - - def training_step(self, batch, batch_idx): - # 1. get embeddings from model - forwarded_batch = self.forward(batch) - # 2. Calculate loss - loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch) - # 3. Update monitor - self.log("train_loss", loss, on_step=True, on_epoch=True, prog_bar=True) - return {"loss": loss} - - def validation_step(self, batch, batch_idx): - # 1. Get embeddings from model - forwarded_batch = self.forward(batch) - # 2. Calculate loss - loss = self.compute_loss(forwarded_batch=forwarded_batch, input_batch=batch) - # 3. Update metric for each batch - self.log("val_loss", loss, on_step=True, on_epoch=True, prog_bar=True) - self.metric_evaluator.append( - g_emb=forwarded_batch["pc_embedding_feats"].float().clone().detach(), - q_emb=forwarded_batch["query_embedding_feats"].float().clone().detach(), - query_ids=batch["query_ids"], - gallery_ids=batch["point_cloud_ids"], - target_ids=batch["point_cloud_ids"], - ) - - return {"loss": loss} - - def validation_epoch_end(self, outputs) -> None: - """ - Callback at validation epoch end to do additional works - with output of validation step, note that this is called - before `training_epoch_end()` - Args: - outputs: output of validation step - """ - self.log_dict( - self.metric_evaluator.evaluate(), - prog_bar=True, - on_step=False, - on_epoch=True, - ) - self.metric_evaluator.reset() - - def train_dataloader(self) -> TRAIN_DATALOADERS: - train_loader = DataLoader( - dataset=self.train_dataset, - collate_fn=self.train_dataset.collate_fn, - **self.cfg["data_loader"]["train"]["params"], - ) - return train_loader - - def val_dataloader(self) -> EVAL_DATALOADERS: - val_loader = DataLoader( - dataset=self.val_dataset, - collate_fn=self.val_dataset.collate_fn, - **self.cfg["data_loader"]["val"]["params"], - ) - return val_loader - - def configure_optimizers(self): - pass \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile deleted file mode 100644 index fab3b70827653a959434cb24929f86e3bd8890e2..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cuda/Dockerfile +++ /dev/null @@ -1,47 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04 -LABEL maintainer="Hugging Face" -LABEL repository="diffusers" - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - libgl1 \ - python3.8 \ - python3-pip \ - python3.8-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -RUN python3 -m pip install --no-cache-dir --upgrade pip && \ - python3 -m pip install --no-cache-dir \ - torch \ - torchvision \ - torchaudio \ - invisible_watermark && \ - python3 -m pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy \ - scipy \ - tensorboard \ - transformers \ - omegaconf \ - pytorch-lightning \ - xformers - -CMD ["/bin/bash"] diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py deleted file mode 100644 index 07604d7c082f7e7b3c89487461af81fb9650efc7..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion/convert_from_ckpt.py +++ /dev/null @@ -1,1645 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Conversion script for the Stable Diffusion checkpoints.""" - -import re -from contextlib import nullcontext -from io import BytesIO -from typing import Optional - -import requests -import torch -from transformers import ( - AutoFeatureExtractor, - BertTokenizerFast, - CLIPImageProcessor, - CLIPTextConfig, - CLIPTextModel, - CLIPTextModelWithProjection, - CLIPTokenizer, - CLIPVisionConfig, - CLIPVisionModelWithProjection, -) - -from ...models import ( - AutoencoderKL, - ControlNetModel, - PriorTransformer, - UNet2DConditionModel, -) -from ...schedulers import ( - DDIMScheduler, - DDPMScheduler, - DPMSolverMultistepScheduler, - EulerAncestralDiscreteScheduler, - EulerDiscreteScheduler, - HeunDiscreteScheduler, - LMSDiscreteScheduler, - PNDMScheduler, - UnCLIPScheduler, -) -from ...utils import is_accelerate_available, is_omegaconf_available, is_safetensors_available, logging -from ...utils.import_utils import BACKENDS_MAPPING -from ..latent_diffusion.pipeline_latent_diffusion import LDMBertConfig, LDMBertModel -from ..paint_by_example import PaintByExampleImageEncoder -from ..pipeline_utils import DiffusionPipeline -from .safety_checker import StableDiffusionSafetyChecker -from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer - - -if is_accelerate_available(): - from accelerate import init_empty_weights - from accelerate.utils import set_module_tensor_to_device - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def shave_segments(path, n_shave_prefix_segments=1): - """ - Removes segments. Positive values shave the first segments, negative shave the last segments. - """ - if n_shave_prefix_segments >= 0: - return ".".join(path.split(".")[n_shave_prefix_segments:]) - else: - return ".".join(path.split(".")[:n_shave_prefix_segments]) - - -def renew_resnet_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside resnets to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item.replace("in_layers.0", "norm1") - new_item = new_item.replace("in_layers.2", "conv1") - - new_item = new_item.replace("out_layers.0", "norm2") - new_item = new_item.replace("out_layers.3", "conv2") - - new_item = new_item.replace("emb_layers.1", "time_emb_proj") - new_item = new_item.replace("skip_connection", "conv_shortcut") - - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside resnets to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item - - new_item = new_item.replace("nin_shortcut", "conv_shortcut") - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def renew_attention_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside attentions to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item - - # new_item = new_item.replace('norm.weight', 'group_norm.weight') - # new_item = new_item.replace('norm.bias', 'group_norm.bias') - - # new_item = new_item.replace('proj_out.weight', 'proj_attn.weight') - # new_item = new_item.replace('proj_out.bias', 'proj_attn.bias') - - # new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0): - """ - Updates paths inside attentions to the new naming scheme (local renaming) - """ - mapping = [] - for old_item in old_list: - new_item = old_item - - new_item = new_item.replace("norm.weight", "group_norm.weight") - new_item = new_item.replace("norm.bias", "group_norm.bias") - - new_item = new_item.replace("q.weight", "to_q.weight") - new_item = new_item.replace("q.bias", "to_q.bias") - - new_item = new_item.replace("k.weight", "to_k.weight") - new_item = new_item.replace("k.bias", "to_k.bias") - - new_item = new_item.replace("v.weight", "to_v.weight") - new_item = new_item.replace("v.bias", "to_v.bias") - - new_item = new_item.replace("proj_out.weight", "to_out.0.weight") - new_item = new_item.replace("proj_out.bias", "to_out.0.bias") - - new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments) - - mapping.append({"old": old_item, "new": new_item}) - - return mapping - - -def assign_to_checkpoint( - paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None -): - """ - This does the final conversion step: take locally converted weights and apply a global renaming to them. It splits - attention layers, and takes into account additional replacements that may arise. - - Assigns the weights to the new checkpoint. - """ - assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys." - - # Splits the attention layers into three variables. - if attention_paths_to_split is not None: - for path, path_map in attention_paths_to_split.items(): - old_tensor = old_checkpoint[path] - channels = old_tensor.shape[0] // 3 - - target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1) - - num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3 - - old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:]) - query, key, value = old_tensor.split(channels // num_heads, dim=1) - - checkpoint[path_map["query"]] = query.reshape(target_shape) - checkpoint[path_map["key"]] = key.reshape(target_shape) - checkpoint[path_map["value"]] = value.reshape(target_shape) - - for path in paths: - new_path = path["new"] - - # These have already been assigned - if attention_paths_to_split is not None and new_path in attention_paths_to_split: - continue - - # Global renaming happens here - new_path = new_path.replace("middle_block.0", "mid_block.resnets.0") - new_path = new_path.replace("middle_block.1", "mid_block.attentions.0") - new_path = new_path.replace("middle_block.2", "mid_block.resnets.1") - - if additional_replacements is not None: - for replacement in additional_replacements: - new_path = new_path.replace(replacement["old"], replacement["new"]) - - # proj_attn.weight has to be converted from conv 1D to linear - is_attn_weight = "proj_attn.weight" in new_path or ("attentions" in new_path and "to_" in new_path) - shape = old_checkpoint[path["old"]].shape - if is_attn_weight and len(shape) == 3: - checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0] - elif is_attn_weight and len(shape) == 4: - checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0, 0] - else: - checkpoint[new_path] = old_checkpoint[path["old"]] - - -def conv_attn_to_linear(checkpoint): - keys = list(checkpoint.keys()) - attn_keys = ["query.weight", "key.weight", "value.weight"] - for key in keys: - if ".".join(key.split(".")[-2:]) in attn_keys: - if checkpoint[key].ndim > 2: - checkpoint[key] = checkpoint[key][:, :, 0, 0] - elif "proj_attn.weight" in key: - if checkpoint[key].ndim > 2: - checkpoint[key] = checkpoint[key][:, :, 0] - - -def create_unet_diffusers_config(original_config, image_size: int, controlnet=False): - """ - Creates a config for the diffusers based on the config of the LDM model. - """ - if controlnet: - unet_params = original_config.model.params.control_stage_config.params - else: - if "unet_config" in original_config.model.params and original_config.model.params.unet_config is not None: - unet_params = original_config.model.params.unet_config.params - else: - unet_params = original_config.model.params.network_config.params - - vae_params = original_config.model.params.first_stage_config.params.ddconfig - - block_out_channels = [unet_params.model_channels * mult for mult in unet_params.channel_mult] - - down_block_types = [] - resolution = 1 - for i in range(len(block_out_channels)): - block_type = "CrossAttnDownBlock2D" if resolution in unet_params.attention_resolutions else "DownBlock2D" - down_block_types.append(block_type) - if i != len(block_out_channels) - 1: - resolution *= 2 - - up_block_types = [] - for i in range(len(block_out_channels)): - block_type = "CrossAttnUpBlock2D" if resolution in unet_params.attention_resolutions else "UpBlock2D" - up_block_types.append(block_type) - resolution //= 2 - - if unet_params.transformer_depth is not None: - transformer_layers_per_block = ( - unet_params.transformer_depth - if isinstance(unet_params.transformer_depth, int) - else list(unet_params.transformer_depth) - ) - else: - transformer_layers_per_block = 1 - - vae_scale_factor = 2 ** (len(vae_params.ch_mult) - 1) - - head_dim = unet_params.num_heads if "num_heads" in unet_params else None - use_linear_projection = ( - unet_params.use_linear_in_transformer if "use_linear_in_transformer" in unet_params else False - ) - if use_linear_projection: - # stable diffusion 2-base-512 and 2-768 - if head_dim is None: - head_dim_mult = unet_params.model_channels // unet_params.num_head_channels - head_dim = [head_dim_mult * c for c in list(unet_params.channel_mult)] - - class_embed_type = None - addition_embed_type = None - addition_time_embed_dim = None - projection_class_embeddings_input_dim = None - context_dim = None - - if unet_params.context_dim is not None: - context_dim = ( - unet_params.context_dim if isinstance(unet_params.context_dim, int) else unet_params.context_dim[0] - ) - - if "num_classes" in unet_params: - if unet_params.num_classes == "sequential": - if context_dim in [2048, 1280]: - # SDXL - addition_embed_type = "text_time" - addition_time_embed_dim = 256 - else: - class_embed_type = "projection" - assert "adm_in_channels" in unet_params - projection_class_embeddings_input_dim = unet_params.adm_in_channels - else: - raise NotImplementedError(f"Unknown conditional unet num_classes config: {unet_params.num_classes}") - - config = { - "sample_size": image_size // vae_scale_factor, - "in_channels": unet_params.in_channels, - "down_block_types": tuple(down_block_types), - "block_out_channels": tuple(block_out_channels), - "layers_per_block": unet_params.num_res_blocks, - "cross_attention_dim": context_dim, - "attention_head_dim": head_dim, - "use_linear_projection": use_linear_projection, - "class_embed_type": class_embed_type, - "addition_embed_type": addition_embed_type, - "addition_time_embed_dim": addition_time_embed_dim, - "projection_class_embeddings_input_dim": projection_class_embeddings_input_dim, - "transformer_layers_per_block": transformer_layers_per_block, - } - - if controlnet: - config["conditioning_channels"] = unet_params.hint_channels - else: - config["out_channels"] = unet_params.out_channels - config["up_block_types"] = tuple(up_block_types) - - return config - - -def create_vae_diffusers_config(original_config, image_size: int): - """ - Creates a config for the diffusers based on the config of the LDM model. - """ - vae_params = original_config.model.params.first_stage_config.params.ddconfig - _ = original_config.model.params.first_stage_config.params.embed_dim - - block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult] - down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) - up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) - - config = { - "sample_size": image_size, - "in_channels": vae_params.in_channels, - "out_channels": vae_params.out_ch, - "down_block_types": tuple(down_block_types), - "up_block_types": tuple(up_block_types), - "block_out_channels": tuple(block_out_channels), - "latent_channels": vae_params.z_channels, - "layers_per_block": vae_params.num_res_blocks, - } - return config - - -def create_diffusers_schedular(original_config): - schedular = DDIMScheduler( - num_train_timesteps=original_config.model.params.timesteps, - beta_start=original_config.model.params.linear_start, - beta_end=original_config.model.params.linear_end, - beta_schedule="scaled_linear", - ) - return schedular - - -def create_ldm_bert_config(original_config): - bert_params = original_config.model.parms.cond_stage_config.params - config = LDMBertConfig( - d_model=bert_params.n_embed, - encoder_layers=bert_params.n_layer, - encoder_ffn_dim=bert_params.n_embed * 4, - ) - return config - - -def convert_ldm_unet_checkpoint( - checkpoint, config, path=None, extract_ema=False, controlnet=False, skip_extract_state_dict=False -): - """ - Takes a state dict and a config, and returns a converted checkpoint. - """ - - if skip_extract_state_dict: - unet_state_dict = checkpoint - else: - # extract state_dict for UNet - unet_state_dict = {} - keys = list(checkpoint.keys()) - - if controlnet: - unet_key = "control_model." - else: - unet_key = "model.diffusion_model." - - # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA - if sum(k.startswith("model_ema") for k in keys) > 100 and extract_ema: - logger.warning(f"Checkpoint {path} has both EMA and non-EMA weights.") - logger.warning( - "In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA" - " weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag." - ) - for key in keys: - if key.startswith("model.diffusion_model"): - flat_ema_key = "model_ema." + "".join(key.split(".")[1:]) - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(flat_ema_key) - else: - if sum(k.startswith("model_ema") for k in keys) > 100: - logger.warning( - "In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA" - " weights (usually better for inference), please make sure to add the `--extract_ema` flag." - ) - - for key in keys: - if key.startswith(unet_key): - unet_state_dict[key.replace(unet_key, "")] = checkpoint.pop(key) - - new_checkpoint = {} - - new_checkpoint["time_embedding.linear_1.weight"] = unet_state_dict["time_embed.0.weight"] - new_checkpoint["time_embedding.linear_1.bias"] = unet_state_dict["time_embed.0.bias"] - new_checkpoint["time_embedding.linear_2.weight"] = unet_state_dict["time_embed.2.weight"] - new_checkpoint["time_embedding.linear_2.bias"] = unet_state_dict["time_embed.2.bias"] - - if config["class_embed_type"] is None: - # No parameters to port - ... - elif config["class_embed_type"] == "timestep" or config["class_embed_type"] == "projection": - new_checkpoint["class_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] - new_checkpoint["class_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] - new_checkpoint["class_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] - new_checkpoint["class_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] - else: - raise NotImplementedError(f"Not implemented `class_embed_type`: {config['class_embed_type']}") - - if config["addition_embed_type"] == "text_time": - new_checkpoint["add_embedding.linear_1.weight"] = unet_state_dict["label_emb.0.0.weight"] - new_checkpoint["add_embedding.linear_1.bias"] = unet_state_dict["label_emb.0.0.bias"] - new_checkpoint["add_embedding.linear_2.weight"] = unet_state_dict["label_emb.0.2.weight"] - new_checkpoint["add_embedding.linear_2.bias"] = unet_state_dict["label_emb.0.2.bias"] - - new_checkpoint["conv_in.weight"] = unet_state_dict["input_blocks.0.0.weight"] - new_checkpoint["conv_in.bias"] = unet_state_dict["input_blocks.0.0.bias"] - - if not controlnet: - new_checkpoint["conv_norm_out.weight"] = unet_state_dict["out.0.weight"] - new_checkpoint["conv_norm_out.bias"] = unet_state_dict["out.0.bias"] - new_checkpoint["conv_out.weight"] = unet_state_dict["out.2.weight"] - new_checkpoint["conv_out.bias"] = unet_state_dict["out.2.bias"] - - # Retrieves the keys for the input blocks only - num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "input_blocks" in layer}) - input_blocks = { - layer_id: [key for key in unet_state_dict if f"input_blocks.{layer_id}" in key] - for layer_id in range(num_input_blocks) - } - - # Retrieves the keys for the middle blocks only - num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "middle_block" in layer}) - middle_blocks = { - layer_id: [key for key in unet_state_dict if f"middle_block.{layer_id}" in key] - for layer_id in range(num_middle_blocks) - } - - # Retrieves the keys for the output blocks only - num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in unet_state_dict if "output_blocks" in layer}) - output_blocks = { - layer_id: [key for key in unet_state_dict if f"output_blocks.{layer_id}" in key] - for layer_id in range(num_output_blocks) - } - - for i in range(1, num_input_blocks): - block_id = (i - 1) // (config["layers_per_block"] + 1) - layer_in_block_id = (i - 1) % (config["layers_per_block"] + 1) - - resnets = [ - key for key in input_blocks[i] if f"input_blocks.{i}.0" in key and f"input_blocks.{i}.0.op" not in key - ] - attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] - - if f"input_blocks.{i}.0.op.weight" in unet_state_dict: - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = unet_state_dict.pop( - f"input_blocks.{i}.0.op.weight" - ) - new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = unet_state_dict.pop( - f"input_blocks.{i}.0.op.bias" - ) - - paths = renew_resnet_paths(resnets) - meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - if len(attentions): - paths = renew_attention_paths(attentions) - meta_path = {"old": f"input_blocks.{i}.1", "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - resnet_0 = middle_blocks[0] - attentions = middle_blocks[1] - resnet_1 = middle_blocks[2] - - resnet_0_paths = renew_resnet_paths(resnet_0) - assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config) - - resnet_1_paths = renew_resnet_paths(resnet_1) - assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config) - - attentions_paths = renew_attention_paths(attentions) - meta_path = {"old": "middle_block.1", "new": "mid_block.attentions.0"} - assign_to_checkpoint( - attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - for i in range(num_output_blocks): - block_id = i // (config["layers_per_block"] + 1) - layer_in_block_id = i % (config["layers_per_block"] + 1) - output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]] - output_block_list = {} - - for layer in output_block_layers: - layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1) - if layer_id in output_block_list: - output_block_list[layer_id].append(layer_name) - else: - output_block_list[layer_id] = [layer_name] - - if len(output_block_list) > 1: - resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] - attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] - - resnet_0_paths = renew_resnet_paths(resnets) - paths = renew_resnet_paths(resnets) - - meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - - output_block_list = {k: sorted(v) for k, v in output_block_list.items()} - if ["conv.bias", "conv.weight"] in output_block_list.values(): - index = list(output_block_list.values()).index(["conv.bias", "conv.weight"]) - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.weight" - ] - new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = unet_state_dict[ - f"output_blocks.{i}.{index}.conv.bias" - ] - - # Clear attentions as they have been attributed above. - if len(attentions) == 2: - attentions = [] - - if len(attentions): - paths = renew_attention_paths(attentions) - meta_path = { - "old": f"output_blocks.{i}.1", - "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}", - } - assign_to_checkpoint( - paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config - ) - else: - resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1) - for path in resnet_0_paths: - old_path = ".".join(["output_blocks", str(i), path["old"]]) - new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]]) - - new_checkpoint[new_path] = unet_state_dict[old_path] - - if controlnet: - # conditioning embedding - - orig_index = 0 - - new_checkpoint["controlnet_cond_embedding.conv_in.weight"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.weight" - ) - new_checkpoint["controlnet_cond_embedding.conv_in.bias"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.bias" - ) - - orig_index += 2 - - diffusers_index = 0 - - while diffusers_index < 6: - new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.weight"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.weight" - ) - new_checkpoint[f"controlnet_cond_embedding.blocks.{diffusers_index}.bias"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.bias" - ) - diffusers_index += 1 - orig_index += 2 - - new_checkpoint["controlnet_cond_embedding.conv_out.weight"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.weight" - ) - new_checkpoint["controlnet_cond_embedding.conv_out.bias"] = unet_state_dict.pop( - f"input_hint_block.{orig_index}.bias" - ) - - # down blocks - for i in range(num_input_blocks): - new_checkpoint[f"controlnet_down_blocks.{i}.weight"] = unet_state_dict.pop(f"zero_convs.{i}.0.weight") - new_checkpoint[f"controlnet_down_blocks.{i}.bias"] = unet_state_dict.pop(f"zero_convs.{i}.0.bias") - - # mid block - new_checkpoint["controlnet_mid_block.weight"] = unet_state_dict.pop("middle_block_out.0.weight") - new_checkpoint["controlnet_mid_block.bias"] = unet_state_dict.pop("middle_block_out.0.bias") - - return new_checkpoint - - -def convert_ldm_vae_checkpoint(checkpoint, config): - # extract state dict for VAE - vae_state_dict = {} - keys = list(checkpoint.keys()) - vae_key = "first_stage_model." if any(k.startswith("first_stage_model.") for k in keys) else "" - for key in keys: - if key.startswith(vae_key): - vae_state_dict[key.replace(vae_key, "")] = checkpoint.get(key) - - new_checkpoint = {} - - new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"] - new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"] - new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"] - new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"] - new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"] - new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"] - - new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"] - new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"] - new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"] - new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"] - new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"] - new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"] - - new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"] - new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"] - new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"] - new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"] - - # Retrieves the keys for the encoder down blocks only - num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer}) - down_blocks = { - layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks) - } - - # Retrieves the keys for the decoder up blocks only - num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer}) - up_blocks = { - layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks) - } - - for i in range(num_down_blocks): - resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key] - - if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict: - new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop( - f"encoder.down.{i}.downsample.conv.weight" - ) - new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop( - f"encoder.down.{i}.downsample.conv.bias" - ) - - paths = renew_vae_resnet_paths(resnets) - meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - - mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key] - num_mid_res_blocks = 2 - for i in range(1, num_mid_res_blocks + 1): - resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key] - - paths = renew_vae_resnet_paths(resnets) - meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - - mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key] - paths = renew_vae_attention_paths(mid_attentions) - meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - conv_attn_to_linear(new_checkpoint) - - for i in range(num_up_blocks): - block_id = num_up_blocks - 1 - i - resnets = [ - key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key - ] - - if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict: - new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[ - f"decoder.up.{block_id}.upsample.conv.weight" - ] - new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[ - f"decoder.up.{block_id}.upsample.conv.bias" - ] - - paths = renew_vae_resnet_paths(resnets) - meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - - mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key] - num_mid_res_blocks = 2 - for i in range(1, num_mid_res_blocks + 1): - resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key] - - paths = renew_vae_resnet_paths(resnets) - meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - - mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key] - paths = renew_vae_attention_paths(mid_attentions) - meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} - assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config) - conv_attn_to_linear(new_checkpoint) - return new_checkpoint - - -def convert_ldm_bert_checkpoint(checkpoint, config): - def _copy_attn_layer(hf_attn_layer, pt_attn_layer): - hf_attn_layer.q_proj.weight.data = pt_attn_layer.to_q.weight - hf_attn_layer.k_proj.weight.data = pt_attn_layer.to_k.weight - hf_attn_layer.v_proj.weight.data = pt_attn_layer.to_v.weight - - hf_attn_layer.out_proj.weight = pt_attn_layer.to_out.weight - hf_attn_layer.out_proj.bias = pt_attn_layer.to_out.bias - - def _copy_linear(hf_linear, pt_linear): - hf_linear.weight = pt_linear.weight - hf_linear.bias = pt_linear.bias - - def _copy_layer(hf_layer, pt_layer): - # copy layer norms - _copy_linear(hf_layer.self_attn_layer_norm, pt_layer[0][0]) - _copy_linear(hf_layer.final_layer_norm, pt_layer[1][0]) - - # copy attn - _copy_attn_layer(hf_layer.self_attn, pt_layer[0][1]) - - # copy MLP - pt_mlp = pt_layer[1][1] - _copy_linear(hf_layer.fc1, pt_mlp.net[0][0]) - _copy_linear(hf_layer.fc2, pt_mlp.net[2]) - - def _copy_layers(hf_layers, pt_layers): - for i, hf_layer in enumerate(hf_layers): - if i != 0: - i += i - pt_layer = pt_layers[i : i + 2] - _copy_layer(hf_layer, pt_layer) - - hf_model = LDMBertModel(config).eval() - - # copy embeds - hf_model.model.embed_tokens.weight = checkpoint.transformer.token_emb.weight - hf_model.model.embed_positions.weight.data = checkpoint.transformer.pos_emb.emb.weight - - # copy layer norm - _copy_linear(hf_model.model.layer_norm, checkpoint.transformer.norm) - - # copy hidden layers - _copy_layers(hf_model.model.layers, checkpoint.transformer.attn_layers.layers) - - _copy_linear(hf_model.to_logits, checkpoint.transformer.to_logits) - - return hf_model - - -def convert_ldm_clip_checkpoint(checkpoint, local_files_only=False, text_encoder=None): - if text_encoder is None: - config_name = "openai/clip-vit-large-patch14" - config = CLIPTextConfig.from_pretrained(config_name) - - ctx = init_empty_weights if is_accelerate_available() else nullcontext - with ctx(): - text_model = CLIPTextModel(config) - - keys = list(checkpoint.keys()) - - text_model_dict = {} - - remove_prefixes = ["cond_stage_model.transformer", "conditioner.embedders.0.transformer"] - - for key in keys: - for prefix in remove_prefixes: - if key.startswith(prefix): - text_model_dict[key[len(prefix + ".") :]] = checkpoint[key] - - if is_accelerate_available(): - for param_name, param in text_model_dict.items(): - set_module_tensor_to_device(text_model, param_name, "cpu", value=param) - else: - if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): - text_model_dict.pop("text_model.embeddings.position_ids", None) - - text_model.load_state_dict(text_model_dict) - - return text_model - - -textenc_conversion_lst = [ - ("positional_embedding", "text_model.embeddings.position_embedding.weight"), - ("token_embedding.weight", "text_model.embeddings.token_embedding.weight"), - ("ln_final.weight", "text_model.final_layer_norm.weight"), - ("ln_final.bias", "text_model.final_layer_norm.bias"), - ("text_projection", "text_projection.weight"), -] -textenc_conversion_map = {x[0]: x[1] for x in textenc_conversion_lst} - -textenc_transformer_conversion_lst = [ - # (stable-diffusion, HF Diffusers) - ("resblocks.", "text_model.encoder.layers."), - ("ln_1", "layer_norm1"), - ("ln_2", "layer_norm2"), - (".c_fc.", ".fc1."), - (".c_proj.", ".fc2."), - (".attn", ".self_attn"), - ("ln_final.", "transformer.text_model.final_layer_norm."), - ("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"), - ("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"), -] -protected = {re.escape(x[0]): x[1] for x in textenc_transformer_conversion_lst} -textenc_pattern = re.compile("|".join(protected.keys())) - - -def convert_paint_by_example_checkpoint(checkpoint): - config = CLIPVisionConfig.from_pretrained("openai/clip-vit-large-patch14") - model = PaintByExampleImageEncoder(config) - - keys = list(checkpoint.keys()) - - text_model_dict = {} - - for key in keys: - if key.startswith("cond_stage_model.transformer"): - text_model_dict[key[len("cond_stage_model.transformer.") :]] = checkpoint[key] - - # load clip vision - model.model.load_state_dict(text_model_dict) - - # load mapper - keys_mapper = { - k[len("cond_stage_model.mapper.res") :]: v - for k, v in checkpoint.items() - if k.startswith("cond_stage_model.mapper") - } - - MAPPING = { - "attn.c_qkv": ["attn1.to_q", "attn1.to_k", "attn1.to_v"], - "attn.c_proj": ["attn1.to_out.0"], - "ln_1": ["norm1"], - "ln_2": ["norm3"], - "mlp.c_fc": ["ff.net.0.proj"], - "mlp.c_proj": ["ff.net.2"], - } - - mapped_weights = {} - for key, value in keys_mapper.items(): - prefix = key[: len("blocks.i")] - suffix = key.split(prefix)[-1].split(".")[-1] - name = key.split(prefix)[-1].split(suffix)[0][1:-1] - mapped_names = MAPPING[name] - - num_splits = len(mapped_names) - for i, mapped_name in enumerate(mapped_names): - new_name = ".".join([prefix, mapped_name, suffix]) - shape = value.shape[0] // num_splits - mapped_weights[new_name] = value[i * shape : (i + 1) * shape] - - model.mapper.load_state_dict(mapped_weights) - - # load final layer norm - model.final_layer_norm.load_state_dict( - { - "bias": checkpoint["cond_stage_model.final_ln.bias"], - "weight": checkpoint["cond_stage_model.final_ln.weight"], - } - ) - - # load final proj - model.proj_out.load_state_dict( - { - "bias": checkpoint["proj_out.bias"], - "weight": checkpoint["proj_out.weight"], - } - ) - - # load uncond vector - model.uncond_vector.data = torch.nn.Parameter(checkpoint["learnable_vector"]) - return model - - -def convert_open_clip_checkpoint( - checkpoint, config_name, prefix="cond_stage_model.model.", has_projection=False, **config_kwargs -): - # text_model = CLIPTextModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="text_encoder") - # text_model = CLIPTextModelWithProjection.from_pretrained( - # "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", projection_dim=1280 - # ) - config = CLIPTextConfig.from_pretrained(config_name, **config_kwargs) - - ctx = init_empty_weights if is_accelerate_available() else nullcontext - with ctx(): - text_model = CLIPTextModelWithProjection(config) if has_projection else CLIPTextModel(config) - - keys = list(checkpoint.keys()) - - keys_to_ignore = [] - if config_name == "stabilityai/stable-diffusion-2" and config.num_hidden_layers == 23: - # make sure to remove all keys > 22 - keys_to_ignore += [k for k in keys if k.startswith("cond_stage_model.model.transformer.resblocks.23")] - keys_to_ignore += ["cond_stage_model.model.text_projection"] - - text_model_dict = {} - - if prefix + "text_projection" in checkpoint: - d_model = int(checkpoint[prefix + "text_projection"].shape[0]) - else: - d_model = 1024 - - text_model_dict["text_model.embeddings.position_ids"] = text_model.text_model.embeddings.get_buffer("position_ids") - - for key in keys: - if key in keys_to_ignore: - continue - if key[len(prefix) :] in textenc_conversion_map: - if key.endswith("text_projection"): - value = checkpoint[key].T.contiguous() - else: - value = checkpoint[key] - - text_model_dict[textenc_conversion_map[key[len(prefix) :]]] = value - - if key.startswith(prefix + "transformer."): - new_key = key[len(prefix + "transformer.") :] - if new_key.endswith(".in_proj_weight"): - new_key = new_key[: -len(".in_proj_weight")] - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) - text_model_dict[new_key + ".q_proj.weight"] = checkpoint[key][:d_model, :] - text_model_dict[new_key + ".k_proj.weight"] = checkpoint[key][d_model : d_model * 2, :] - text_model_dict[new_key + ".v_proj.weight"] = checkpoint[key][d_model * 2 :, :] - elif new_key.endswith(".in_proj_bias"): - new_key = new_key[: -len(".in_proj_bias")] - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) - text_model_dict[new_key + ".q_proj.bias"] = checkpoint[key][:d_model] - text_model_dict[new_key + ".k_proj.bias"] = checkpoint[key][d_model : d_model * 2] - text_model_dict[new_key + ".v_proj.bias"] = checkpoint[key][d_model * 2 :] - else: - new_key = textenc_pattern.sub(lambda m: protected[re.escape(m.group(0))], new_key) - - text_model_dict[new_key] = checkpoint[key] - - if is_accelerate_available(): - for param_name, param in text_model_dict.items(): - set_module_tensor_to_device(text_model, param_name, "cpu", value=param) - else: - if not (hasattr(text_model, "embeddings") and hasattr(text_model.embeddings.position_ids)): - text_model_dict.pop("text_model.embeddings.position_ids", None) - - text_model.load_state_dict(text_model_dict) - - return text_model - - -def stable_unclip_image_encoder(original_config): - """ - Returns the image processor and clip image encoder for the img2img unclip pipeline. - - We currently know of two types of stable unclip models which separately use the clip and the openclip image - encoders. - """ - - image_embedder_config = original_config.model.params.embedder_config - - sd_clip_image_embedder_class = image_embedder_config.target - sd_clip_image_embedder_class = sd_clip_image_embedder_class.split(".")[-1] - - if sd_clip_image_embedder_class == "ClipImageEmbedder": - clip_model_name = image_embedder_config.params.model - - if clip_model_name == "ViT-L/14": - feature_extractor = CLIPImageProcessor() - image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") - else: - raise NotImplementedError(f"Unknown CLIP checkpoint name in stable diffusion checkpoint {clip_model_name}") - - elif sd_clip_image_embedder_class == "FrozenOpenCLIPImageEmbedder": - feature_extractor = CLIPImageProcessor() - image_encoder = CLIPVisionModelWithProjection.from_pretrained("laion/CLIP-ViT-H-14-laion2B-s32B-b79K") - else: - raise NotImplementedError( - f"Unknown CLIP image embedder class in stable diffusion checkpoint {sd_clip_image_embedder_class}" - ) - - return feature_extractor, image_encoder - - -def stable_unclip_image_noising_components( - original_config, clip_stats_path: Optional[str] = None, device: Optional[str] = None -): - """ - Returns the noising components for the img2img and txt2img unclip pipelines. - - Converts the stability noise augmentor into - 1. a `StableUnCLIPImageNormalizer` for holding the CLIP stats - 2. a `DDPMScheduler` for holding the noise schedule - - If the noise augmentor config specifies a clip stats path, the `clip_stats_path` must be provided. - """ - noise_aug_config = original_config.model.params.noise_aug_config - noise_aug_class = noise_aug_config.target - noise_aug_class = noise_aug_class.split(".")[-1] - - if noise_aug_class == "CLIPEmbeddingNoiseAugmentation": - noise_aug_config = noise_aug_config.params - embedding_dim = noise_aug_config.timestep_dim - max_noise_level = noise_aug_config.noise_schedule_config.timesteps - beta_schedule = noise_aug_config.noise_schedule_config.beta_schedule - - image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedding_dim) - image_noising_scheduler = DDPMScheduler(num_train_timesteps=max_noise_level, beta_schedule=beta_schedule) - - if "clip_stats_path" in noise_aug_config: - if clip_stats_path is None: - raise ValueError("This stable unclip config requires a `clip_stats_path`") - - clip_mean, clip_std = torch.load(clip_stats_path, map_location=device) - clip_mean = clip_mean[None, :] - clip_std = clip_std[None, :] - - clip_stats_state_dict = { - "mean": clip_mean, - "std": clip_std, - } - - image_normalizer.load_state_dict(clip_stats_state_dict) - else: - raise NotImplementedError(f"Unknown noise augmentor class: {noise_aug_class}") - - return image_normalizer, image_noising_scheduler - - -def convert_controlnet_checkpoint( - checkpoint, - original_config, - checkpoint_path, - image_size, - upcast_attention, - extract_ema, - use_linear_projection=None, - cross_attention_dim=None, -): - ctrlnet_config = create_unet_diffusers_config(original_config, image_size=image_size, controlnet=True) - ctrlnet_config["upcast_attention"] = upcast_attention - - ctrlnet_config.pop("sample_size") - - if use_linear_projection is not None: - ctrlnet_config["use_linear_projection"] = use_linear_projection - - if cross_attention_dim is not None: - ctrlnet_config["cross_attention_dim"] = cross_attention_dim - - controlnet = ControlNetModel(**ctrlnet_config) - - # Some controlnet ckpt files are distributed independently from the rest of the - # model components i.e. https://huggingface.co/thibaud/controlnet-sd21/ - if "time_embed.0.weight" in checkpoint: - skip_extract_state_dict = True - else: - skip_extract_state_dict = False - - converted_ctrl_checkpoint = convert_ldm_unet_checkpoint( - checkpoint, - ctrlnet_config, - path=checkpoint_path, - extract_ema=extract_ema, - controlnet=True, - skip_extract_state_dict=skip_extract_state_dict, - ) - - controlnet.load_state_dict(converted_ctrl_checkpoint) - - return controlnet - - -def download_from_original_stable_diffusion_ckpt( - checkpoint_path: str, - original_config_file: str = None, - image_size: Optional[int] = None, - prediction_type: str = None, - model_type: str = None, - extract_ema: bool = False, - scheduler_type: str = "pndm", - num_in_channels: Optional[int] = None, - upcast_attention: Optional[bool] = None, - device: str = None, - from_safetensors: bool = False, - stable_unclip: Optional[str] = None, - stable_unclip_prior: Optional[str] = None, - clip_stats_path: Optional[str] = None, - controlnet: Optional[bool] = None, - load_safety_checker: bool = True, - pipeline_class: DiffusionPipeline = None, - local_files_only=False, - vae_path=None, - vae=None, - text_encoder=None, - tokenizer=None, -) -> DiffusionPipeline: - """ - Load a Stable Diffusion pipeline object from a CompVis-style `.ckpt`/`.safetensors` file and (ideally) a `.yaml` - config file. - - Although many of the arguments can be automatically inferred, some of these rely on brittle checks against the - global step count, which will likely fail for models that have undergone further fine-tuning. Therefore, it is - recommended that you override the default values and/or supply an `original_config_file` wherever possible. - - Args: - checkpoint_path (`str`): Path to `.ckpt` file. - original_config_file (`str`): - Path to `.yaml` config file corresponding to the original architecture. If `None`, will be automatically - inferred by looking for a key that only exists in SD2.0 models. - image_size (`int`, *optional*, defaults to 512): - The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Diffusion v2 - Base. Use 768 for Stable Diffusion v2. - prediction_type (`str`, *optional*): - The prediction type that the model was trained on. Use `'epsilon'` for Stable Diffusion v1.X and Stable - Diffusion v2 Base. Use `'v_prediction'` for Stable Diffusion v2. - num_in_channels (`int`, *optional*, defaults to None): - The number of input channels. If `None`, it will be automatically inferred. - scheduler_type (`str`, *optional*, defaults to 'pndm'): - Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm", - "ddim"]`. - model_type (`str`, *optional*, defaults to `None`): - The pipeline type. `None` to automatically infer, or one of `["FrozenOpenCLIPEmbedder", - "FrozenCLIPEmbedder", "PaintByExample"]`. - is_img2img (`bool`, *optional*, defaults to `False`): - Whether the model should be loaded as an img2img pipeline. - extract_ema (`bool`, *optional*, defaults to `False`): Only relevant for - checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights or not. Defaults to - `False`. Pass `True` to extract the EMA weights. EMA weights usually yield higher quality images for - inference. Non-EMA weights are usually better to continue fine-tuning. - upcast_attention (`bool`, *optional*, defaults to `None`): - Whether the attention computation should always be upcasted. This is necessary when running stable - diffusion 2.1. - device (`str`, *optional*, defaults to `None`): - The device to use. Pass `None` to determine automatically. - from_safetensors (`str`, *optional*, defaults to `False`): - If `checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch. - load_safety_checker (`bool`, *optional*, defaults to `True`): - Whether to load the safety checker or not. Defaults to `True`. - pipeline_class (`str`, *optional*, defaults to `None`): - The pipeline class to use. Pass `None` to determine automatically. - local_files_only (`bool`, *optional*, defaults to `False`): - Whether or not to only look at local files (i.e., do not try to download the model). - vae (`AutoencoderKL`, *optional*, defaults to `None`): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If - this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. - text_encoder (`CLIPTextModel`, *optional*, defaults to `None`): - An instance of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel) - to use, specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) - variant. If this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed. - tokenizer (`CLIPTokenizer`, *optional*, defaults to `None`): - An instance of - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer) - to use. If this parameter is `None`, the function will load a new instance of [CLIPTokenizer] by itself, if - needed. - return: A StableDiffusionPipeline object representing the passed-in `.ckpt`/`.safetensors` file. - """ - - # import pipelines here to avoid circular import error when using from_single_file method - from diffusers import ( - LDMTextToImagePipeline, - PaintByExamplePipeline, - StableDiffusionControlNetPipeline, - StableDiffusionInpaintPipeline, - StableDiffusionPipeline, - StableDiffusionXLImg2ImgPipeline, - StableDiffusionXLPipeline, - StableUnCLIPImg2ImgPipeline, - StableUnCLIPPipeline, - ) - - if pipeline_class is None: - pipeline_class = StableDiffusionPipeline if not controlnet else StableDiffusionControlNetPipeline - - if prediction_type == "v-prediction": - prediction_type = "v_prediction" - - if not is_omegaconf_available(): - raise ValueError(BACKENDS_MAPPING["omegaconf"][1]) - - from omegaconf import OmegaConf - - if from_safetensors: - if not is_safetensors_available(): - raise ValueError(BACKENDS_MAPPING["safetensors"][1]) - - from safetensors.torch import load_file as safe_load - - checkpoint = safe_load(checkpoint_path, device="cpu") - else: - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - checkpoint = torch.load(checkpoint_path, map_location=device) - else: - checkpoint = torch.load(checkpoint_path, map_location=device) - - # Sometimes models don't have the global_step item - if "global_step" in checkpoint: - global_step = checkpoint["global_step"] - else: - logger.debug("global_step key not found in model") - global_step = None - - # NOTE: this while loop isn't great but this controlnet checkpoint has one additional - # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 - while "state_dict" in checkpoint: - checkpoint = checkpoint["state_dict"] - - if original_config_file is None: - key_name_v2_1 = "model.diffusion_model.input_blocks.2.1.transformer_blocks.0.attn2.to_k.weight" - key_name_sd_xl_base = "conditioner.embedders.1.model.transformer.resblocks.9.mlp.c_proj.bias" - key_name_sd_xl_refiner = "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias" - - # model_type = "v1" - config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" - - if key_name_v2_1 in checkpoint and checkpoint[key_name_v2_1].shape[-1] == 1024: - # model_type = "v2" - config_url = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" - - if global_step == 110000: - # v2.1 needs to upcast attention - upcast_attention = True - elif key_name_sd_xl_base in checkpoint: - # only base xl has two text embedders - config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" - elif key_name_sd_xl_refiner in checkpoint: - # only refiner xl has embedder and one text embedders - config_url = "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" - - original_config_file = BytesIO(requests.get(config_url).content) - - original_config = OmegaConf.load(original_config_file) - - # Convert the text model. - if ( - model_type is None - and "cond_stage_config" in original_config.model.params - and original_config.model.params.cond_stage_config is not None - ): - model_type = original_config.model.params.cond_stage_config.target.split(".")[-1] - logger.debug(f"no `model_type` given, `model_type` inferred as: {model_type}") - elif model_type is None and original_config.model.params.network_config is not None: - if original_config.model.params.network_config.params.context_dim == 2048: - model_type = "SDXL" - else: - model_type = "SDXL-Refiner" - if image_size is None: - image_size = 1024 - - if num_in_channels is None and pipeline_class == StableDiffusionInpaintPipeline: - num_in_channels = 9 - elif num_in_channels is None: - num_in_channels = 4 - - if "unet_config" in original_config.model.params: - original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels - - if ( - "parameterization" in original_config["model"]["params"] - and original_config["model"]["params"]["parameterization"] == "v" - ): - if prediction_type is None: - # NOTE: For stable diffusion 2 base it is recommended to pass `prediction_type=="epsilon"` - # as it relies on a brittle global step parameter here - prediction_type = "epsilon" if global_step == 875000 else "v_prediction" - if image_size is None: - # NOTE: For stable diffusion 2 base one has to pass `image_size==512` - # as it relies on a brittle global step parameter here - image_size = 512 if global_step == 875000 else 768 - else: - if prediction_type is None: - prediction_type = "epsilon" - if image_size is None: - image_size = 512 - - if controlnet is None and "control_stage_config" in original_config.model.params: - controlnet = convert_controlnet_checkpoint( - checkpoint, original_config, checkpoint_path, image_size, upcast_attention, extract_ema - ) - - num_train_timesteps = getattr(original_config.model.params, "timesteps", None) or 1000 - - if model_type in ["SDXL", "SDXL-Refiner"]: - scheduler_dict = { - "beta_schedule": "scaled_linear", - "beta_start": 0.00085, - "beta_end": 0.012, - "interpolation_type": "linear", - "num_train_timesteps": num_train_timesteps, - "prediction_type": "epsilon", - "sample_max_value": 1.0, - "set_alpha_to_one": False, - "skip_prk_steps": True, - "steps_offset": 1, - "timestep_spacing": "leading", - } - scheduler = EulerDiscreteScheduler.from_config(scheduler_dict) - scheduler_type = "euler" - else: - beta_start = getattr(original_config.model.params, "linear_start", None) or 0.02 - beta_end = getattr(original_config.model.params, "linear_end", None) or 0.085 - scheduler = DDIMScheduler( - beta_end=beta_end, - beta_schedule="scaled_linear", - beta_start=beta_start, - num_train_timesteps=num_train_timesteps, - steps_offset=1, - clip_sample=False, - set_alpha_to_one=False, - prediction_type=prediction_type, - ) - # make sure scheduler works correctly with DDIM - scheduler.register_to_config(clip_sample=False) - - if scheduler_type == "pndm": - config = dict(scheduler.config) - config["skip_prk_steps"] = True - scheduler = PNDMScheduler.from_config(config) - elif scheduler_type == "lms": - scheduler = LMSDiscreteScheduler.from_config(scheduler.config) - elif scheduler_type == "heun": - scheduler = HeunDiscreteScheduler.from_config(scheduler.config) - elif scheduler_type == "euler": - scheduler = EulerDiscreteScheduler.from_config(scheduler.config) - elif scheduler_type == "euler-ancestral": - scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler.config) - elif scheduler_type == "dpm": - scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) - elif scheduler_type == "ddim": - scheduler = scheduler - else: - raise ValueError(f"Scheduler of type {scheduler_type} doesn't exist!") - - # Convert the UNet2DConditionModel model. - unet_config = create_unet_diffusers_config(original_config, image_size=image_size) - unet_config["upcast_attention"] = upcast_attention - converted_unet_checkpoint = convert_ldm_unet_checkpoint( - checkpoint, unet_config, path=checkpoint_path, extract_ema=extract_ema - ) - - ctx = init_empty_weights if is_accelerate_available() else nullcontext - with ctx(): - unet = UNet2DConditionModel(**unet_config) - - if is_accelerate_available(): - for param_name, param in converted_unet_checkpoint.items(): - set_module_tensor_to_device(unet, param_name, "cpu", value=param) - else: - unet.load_state_dict(converted_unet_checkpoint) - - # Convert the VAE model. - if vae_path is None and vae is None: - vae_config = create_vae_diffusers_config(original_config, image_size=image_size) - converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config) - - if ( - "model" in original_config - and "params" in original_config.model - and "scale_factor" in original_config.model.params - ): - vae_scaling_factor = original_config.model.params.scale_factor - else: - vae_scaling_factor = 0.18215 # default SD scaling factor - - vae_config["scaling_factor"] = vae_scaling_factor - - ctx = init_empty_weights if is_accelerate_available() else nullcontext - with ctx(): - vae = AutoencoderKL(**vae_config) - - if is_accelerate_available(): - for param_name, param in converted_vae_checkpoint.items(): - set_module_tensor_to_device(vae, param_name, "cpu", value=param) - else: - vae.load_state_dict(converted_vae_checkpoint) - elif vae is None: - vae = AutoencoderKL.from_pretrained(vae_path) - - if model_type == "FrozenOpenCLIPEmbedder": - config_name = "stabilityai/stable-diffusion-2" - config_kwargs = {"subfolder": "text_encoder"} - - text_model = convert_open_clip_checkpoint(checkpoint, config_name, **config_kwargs) - tokenizer = CLIPTokenizer.from_pretrained("stabilityai/stable-diffusion-2", subfolder="tokenizer") - - if stable_unclip is None: - if controlnet: - pipe = pipeline_class( - vae=vae, - text_encoder=text_model, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - controlnet=controlnet, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - else: - pipe = pipeline_class( - vae=vae, - text_encoder=text_model, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=None, - feature_extractor=None, - requires_safety_checker=False, - ) - else: - image_normalizer, image_noising_scheduler = stable_unclip_image_noising_components( - original_config, clip_stats_path=clip_stats_path, device=device - ) - - if stable_unclip == "img2img": - feature_extractor, image_encoder = stable_unclip_image_encoder(original_config) - - pipe = StableUnCLIPImg2ImgPipeline( - # image encoding components - feature_extractor=feature_extractor, - image_encoder=image_encoder, - # image noising components - image_normalizer=image_normalizer, - image_noising_scheduler=image_noising_scheduler, - # regular denoising components - tokenizer=tokenizer, - text_encoder=text_model, - unet=unet, - scheduler=scheduler, - # vae - vae=vae, - ) - elif stable_unclip == "txt2img": - if stable_unclip_prior is None or stable_unclip_prior == "karlo": - karlo_model = "kakaobrain/karlo-v1-alpha" - prior = PriorTransformer.from_pretrained(karlo_model, subfolder="prior") - - prior_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") - prior_text_model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") - - prior_scheduler = UnCLIPScheduler.from_pretrained(karlo_model, subfolder="prior_scheduler") - prior_scheduler = DDPMScheduler.from_config(prior_scheduler.config) - else: - raise NotImplementedError(f"unknown prior for stable unclip model: {stable_unclip_prior}") - - pipe = StableUnCLIPPipeline( - # prior components - prior_tokenizer=prior_tokenizer, - prior_text_encoder=prior_text_model, - prior=prior, - prior_scheduler=prior_scheduler, - # image noising components - image_normalizer=image_normalizer, - image_noising_scheduler=image_noising_scheduler, - # regular denoising components - tokenizer=tokenizer, - text_encoder=text_model, - unet=unet, - scheduler=scheduler, - # vae - vae=vae, - ) - else: - raise NotImplementedError(f"unknown `stable_unclip` type: {stable_unclip}") - elif model_type == "PaintByExample": - vision_model = convert_paint_by_example_checkpoint(checkpoint) - tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") - feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker") - pipe = PaintByExamplePipeline( - vae=vae, - image_encoder=vision_model, - unet=unet, - scheduler=scheduler, - safety_checker=None, - feature_extractor=feature_extractor, - ) - elif model_type == "FrozenCLIPEmbedder": - text_model = convert_ldm_clip_checkpoint( - checkpoint, local_files_only=local_files_only, text_encoder=text_encoder - ) - tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") if tokenizer is None else tokenizer - - if load_safety_checker: - safety_checker = StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker") - feature_extractor = AutoFeatureExtractor.from_pretrained("CompVis/stable-diffusion-safety-checker") - else: - safety_checker = None - feature_extractor = None - - if controlnet: - pipe = pipeline_class( - vae=vae, - text_encoder=text_model, - tokenizer=tokenizer, - unet=unet, - controlnet=controlnet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - else: - pipe = pipeline_class( - vae=vae, - text_encoder=text_model, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - elif model_type in ["SDXL", "SDXL-Refiner"]: - if model_type == "SDXL": - tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14") - text_encoder = convert_ldm_clip_checkpoint(checkpoint, local_files_only=local_files_only) - tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!") - - config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" - config_kwargs = {"projection_dim": 1280} - text_encoder_2 = convert_open_clip_checkpoint( - checkpoint, config_name, prefix="conditioner.embedders.1.model.", has_projection=True, **config_kwargs - ) - - pipe = StableDiffusionXLPipeline( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - text_encoder_2=text_encoder_2, - tokenizer_2=tokenizer_2, - unet=unet, - scheduler=scheduler, - force_zeros_for_empty_prompt=True, - ) - else: - tokenizer = None - text_encoder = None - tokenizer_2 = CLIPTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", pad_token="!") - - config_name = "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k" - config_kwargs = {"projection_dim": 1280} - text_encoder_2 = convert_open_clip_checkpoint( - checkpoint, config_name, prefix="conditioner.embedders.0.model.", has_projection=True, **config_kwargs - ) - - pipe = StableDiffusionXLImg2ImgPipeline( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - text_encoder_2=text_encoder_2, - tokenizer_2=tokenizer_2, - unet=unet, - scheduler=scheduler, - requires_aesthetics_score=True, - force_zeros_for_empty_prompt=False, - ) - else: - text_config = create_ldm_bert_config(original_config) - text_model = convert_ldm_bert_checkpoint(checkpoint, text_config) - tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - pipe = LDMTextToImagePipeline(vqvae=vae, bert=text_model, tokenizer=tokenizer, unet=unet, scheduler=scheduler) - - return pipe - - -def download_controlnet_from_original_ckpt( - checkpoint_path: str, - original_config_file: str, - image_size: int = 512, - extract_ema: bool = False, - num_in_channels: Optional[int] = None, - upcast_attention: Optional[bool] = None, - device: str = None, - from_safetensors: bool = False, - use_linear_projection: Optional[bool] = None, - cross_attention_dim: Optional[bool] = None, -) -> DiffusionPipeline: - if not is_omegaconf_available(): - raise ValueError(BACKENDS_MAPPING["omegaconf"][1]) - - from omegaconf import OmegaConf - - if from_safetensors: - if not is_safetensors_available(): - raise ValueError(BACKENDS_MAPPING["safetensors"][1]) - - from safetensors import safe_open - - checkpoint = {} - with safe_open(checkpoint_path, framework="pt", device="cpu") as f: - for key in f.keys(): - checkpoint[key] = f.get_tensor(key) - else: - if device is None: - device = "cuda" if torch.cuda.is_available() else "cpu" - checkpoint = torch.load(checkpoint_path, map_location=device) - else: - checkpoint = torch.load(checkpoint_path, map_location=device) - - # NOTE: this while loop isn't great but this controlnet checkpoint has one additional - # "state_dict" key https://huggingface.co/thibaud/controlnet-canny-sd21 - while "state_dict" in checkpoint: - checkpoint = checkpoint["state_dict"] - - original_config = OmegaConf.load(original_config_file) - - if num_in_channels is not None: - original_config["model"]["params"]["unet_config"]["params"]["in_channels"] = num_in_channels - - if "control_stage_config" not in original_config.model.params: - raise ValueError("`control_stage_config` not present in original config") - - controlnet = convert_controlnet_checkpoint( - checkpoint, - original_config, - checkpoint_path, - image_size, - upcast_attention, - extract_ema, - use_linear_projection=use_linear_projection, - cross_attention_dim=cross_attention_dim, - ) - - return controlnet diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py deleted file mode 100644 index 01b8a0f3eec1117ef7c84c228a9f46763df2140f..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky.py +++ /dev/null @@ -1,317 +0,0 @@ -# coding=utf-8 -# Copyright 2023 HuggingFace Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import gc -import random -import unittest - -import numpy as np -import torch -from transformers import XLMRobertaTokenizerFast - -from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel -from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP -from diffusers.utils import floats_tensor, load_numpy, slow, torch_device -from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu - -from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference - - -enable_full_determinism() - - -class Dummies: - @property - def text_embedder_hidden_size(self): - return 32 - - @property - def time_input_dim(self): - return 32 - - @property - def block_out_channels_0(self): - return self.time_input_dim - - @property - def time_embed_dim(self): - return self.time_input_dim * 4 - - @property - def cross_attention_dim(self): - return 32 - - @property - def dummy_tokenizer(self): - tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") - return tokenizer - - @property - def dummy_text_encoder(self): - torch.manual_seed(0) - config = MCLIPConfig( - numDims=self.cross_attention_dim, - transformerDimensions=self.text_embedder_hidden_size, - hidden_size=self.text_embedder_hidden_size, - intermediate_size=37, - num_attention_heads=4, - num_hidden_layers=5, - vocab_size=1005, - ) - - text_encoder = MultilingualCLIP(config) - text_encoder = text_encoder.eval() - - return text_encoder - - @property - def dummy_unet(self): - torch.manual_seed(0) - - model_kwargs = { - "in_channels": 4, - # Out channels is double in channels because predicts mean and variance - "out_channels": 8, - "addition_embed_type": "text_image", - "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), - "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), - "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", - "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), - "layers_per_block": 1, - "encoder_hid_dim": self.text_embedder_hidden_size, - "encoder_hid_dim_type": "text_image_proj", - "cross_attention_dim": self.cross_attention_dim, - "attention_head_dim": 4, - "resnet_time_scale_shift": "scale_shift", - "class_embed_type": None, - } - - model = UNet2DConditionModel(**model_kwargs) - return model - - @property - def dummy_movq_kwargs(self): - return { - "block_out_channels": [32, 64], - "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], - "in_channels": 3, - "latent_channels": 4, - "layers_per_block": 1, - "norm_num_groups": 8, - "norm_type": "spatial", - "num_vq_embeddings": 12, - "out_channels": 3, - "up_block_types": [ - "AttnUpDecoderBlock2D", - "UpDecoderBlock2D", - ], - "vq_embed_dim": 4, - } - - @property - def dummy_movq(self): - torch.manual_seed(0) - model = VQModel(**self.dummy_movq_kwargs) - return model - - def get_dummy_components(self): - text_encoder = self.dummy_text_encoder - tokenizer = self.dummy_tokenizer - unet = self.dummy_unet - movq = self.dummy_movq - - scheduler = DDIMScheduler( - num_train_timesteps=1000, - beta_schedule="linear", - beta_start=0.00085, - beta_end=0.012, - clip_sample=False, - set_alpha_to_one=False, - steps_offset=1, - prediction_type="epsilon", - thresholding=False, - ) - - components = { - "text_encoder": text_encoder, - "tokenizer": tokenizer, - "unet": unet, - "scheduler": scheduler, - "movq": movq, - } - return components - - def get_dummy_inputs(self, device, seed=0): - image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) - negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) - if str(device).startswith("mps"): - generator = torch.manual_seed(seed) - else: - generator = torch.Generator(device=device).manual_seed(seed) - inputs = { - "prompt": "horse", - "image_embeds": image_embeds, - "negative_image_embeds": negative_image_embeds, - "generator": generator, - "height": 64, - "width": 64, - "guidance_scale": 4.0, - "num_inference_steps": 2, - "output_type": "np", - } - return inputs - - -class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): - pipeline_class = KandinskyPipeline - params = [ - "prompt", - "image_embeds", - "negative_image_embeds", - ] - batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] - required_optional_params = [ - "generator", - "height", - "width", - "latents", - "guidance_scale", - "negative_prompt", - "num_inference_steps", - "return_dict", - "guidance_scale", - "num_images_per_prompt", - "output_type", - "return_dict", - ] - test_xformers_attention = False - - def get_dummy_components(self): - dummy = Dummies() - return dummy.get_dummy_components() - - def get_dummy_inputs(self, device, seed=0): - dummy = Dummies() - return dummy.get_dummy_inputs(device=device, seed=seed) - - def test_kandinsky(self): - device = "cpu" - - components = self.get_dummy_components() - - pipe = self.pipeline_class(**components) - pipe = pipe.to(device) - - pipe.set_progress_bar_config(disable=None) - - output = pipe(**self.get_dummy_inputs(device)) - image = output.images - - image_from_tuple = pipe( - **self.get_dummy_inputs(device), - return_dict=False, - )[0] - - image_slice = image[0, -3:, -3:, -1] - image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] - - assert image.shape == (1, 64, 64, 3) - - expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) - - assert ( - np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" - assert ( - np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 - ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" - - @require_torch_gpu - def test_offloads(self): - pipes = [] - components = self.get_dummy_components() - sd_pipe = self.pipeline_class(**components).to(torch_device) - pipes.append(sd_pipe) - - components = self.get_dummy_components() - sd_pipe = self.pipeline_class(**components) - sd_pipe.enable_model_cpu_offload() - pipes.append(sd_pipe) - - components = self.get_dummy_components() - sd_pipe = self.pipeline_class(**components) - sd_pipe.enable_sequential_cpu_offload() - pipes.append(sd_pipe) - - image_slices = [] - for pipe in pipes: - inputs = self.get_dummy_inputs(torch_device) - image = pipe(**inputs).images - - image_slices.append(image[0, -3:, -3:, -1].flatten()) - - assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 - assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 - - -@slow -@require_torch_gpu -class KandinskyPipelineIntegrationTests(unittest.TestCase): - def tearDown(self): - # clean up the VRAM after each test - super().tearDown() - gc.collect() - torch.cuda.empty_cache() - - def test_kandinsky_text2img(self): - expected_image = load_numpy( - "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" - "/kandinsky/kandinsky_text2img_cat_fp16.npy" - ) - - pipe_prior = KandinskyPriorPipeline.from_pretrained( - "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 - ) - pipe_prior.to(torch_device) - - pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) - pipeline = pipeline.to(torch_device) - pipeline.set_progress_bar_config(disable=None) - - prompt = "red cat, 4k photo" - - generator = torch.Generator(device="cuda").manual_seed(0) - image_emb, zero_image_emb = pipe_prior( - prompt, - generator=generator, - num_inference_steps=5, - negative_prompt="", - ).to_tuple() - - generator = torch.Generator(device="cuda").manual_seed(0) - output = pipeline( - prompt, - image_embeds=image_emb, - negative_image_embeds=zero_image_emb, - generator=generator, - num_inference_steps=100, - output_type="np", - ) - - image = output.images[0] - - assert image.shape == (512, 512, 3) - - assert_mean_pixel_difference(image, expected_image) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py deleted file mode 100644 index 61b9751057f10f2173b8e7edde12cca53ebbd2d0..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/ghm/retinanet_ghm_r50_fpn_1x_coco.py +++ /dev/null @@ -1,19 +0,0 @@ -_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' -model = dict( - bbox_head=dict( - loss_cls=dict( - _delete_=True, - type='GHMC', - bins=30, - momentum=0.75, - use_sigmoid=True, - loss_weight=1.0), - loss_bbox=dict( - _delete_=True, - type='GHMR', - mu=0.02, - bins=10, - momentum=0.7, - loss_weight=10.0))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py deleted file mode 100644 index 4aa00ece55280697fc67bd727077a8c9a58cfa44..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_1x_coco.py +++ /dev/null @@ -1,11 +0,0 @@ -_base_ = ['grid_rcnn_r50_fpn_gn-head_2x_coco.py'] -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -checkpoint_config = dict(interval=1) -# runtime settings -runner = dict(type='EpochBasedRunner', max_epochs=12) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py deleted file mode 100644 index 7243bb62893839568ec51928d88a5ad40b02a66c..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/vfnet_head.py +++ /dev/null @@ -1,794 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init -from mmcv.ops import DeformConv2d -from mmcv.runner import force_fp32 - -from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator, - build_assigner, build_sampler, distance2bbox, - multi_apply, multiclass_nms, reduce_mean) -from ..builder import HEADS, build_loss -from .atss_head import ATSSHead -from .fcos_head import FCOSHead - -INF = 1e8 - - -@HEADS.register_module() -class VFNetHead(ATSSHead, FCOSHead): - """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object - Detector.`_. - - The VFNet predicts IoU-aware classification scores which mix the - object presence confidence and object localization accuracy as the - detection score. It is built on the FCOS architecture and uses ATSS - for defining positive/negative training examples. The VFNet is trained - with Varifocal Loss and empolys star-shaped deformable convolution to - extract features for a bbox. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - regress_ranges (tuple[tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. Default: False. - center_sample_radius (float): Radius of center sampling. Default: 1.5. - sync_num_pos (bool): If true, synchronize the number of positive - examples across GPUs. Default: True - gradient_mul (float): The multiplier to gradients from bbox refinement - and recognition. Default: 0.1. - bbox_norm_type (str): The bbox normalization type, 'reg_denom' or - 'stride'. Default: reg_denom - loss_cls_fl (dict): Config of focal loss. - use_vfl (bool): If true, use varifocal loss for training. - Default: True. - loss_cls (dict): Config of varifocal loss. - loss_bbox (dict): Config of localization loss, GIoU Loss. - loss_bbox (dict): Config of localization refinement loss, GIoU Loss. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, - requires_grad=True). - use_atss (bool): If true, use ATSS to define positive/negative - examples. Default: True. - anchor_generator (dict): Config of anchor generator for ATSS. - - Example: - >>> self = VFNetHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes, - in_channels, - regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), - (512, INF)), - center_sampling=False, - center_sample_radius=1.5, - sync_num_pos=True, - gradient_mul=0.1, - bbox_norm_type='reg_denom', - loss_cls_fl=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - use_vfl=True, - loss_cls=dict( - type='VarifocalLoss', - use_sigmoid=True, - alpha=0.75, - gamma=2.0, - iou_weighted=True, - loss_weight=1.0), - loss_bbox=dict(type='GIoULoss', loss_weight=1.5), - loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - use_atss=True, - anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - octave_base_scale=8, - scales_per_octave=1, - center_offset=0.0, - strides=[8, 16, 32, 64, 128]), - **kwargs): - # dcn base offsets, adapted from reppoints_head.py - self.num_dconv_points = 9 - self.dcn_kernel = int(np.sqrt(self.num_dconv_points)) - self.dcn_pad = int((self.dcn_kernel - 1) / 2) - dcn_base = np.arange(-self.dcn_pad, - self.dcn_pad + 1).astype(np.float64) - dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) - dcn_base_x = np.tile(dcn_base, self.dcn_kernel) - dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( - (-1)) - self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) - - super(FCOSHead, self).__init__( - num_classes, in_channels, norm_cfg=norm_cfg, **kwargs) - self.regress_ranges = regress_ranges - self.reg_denoms = [ - regress_range[-1] for regress_range in regress_ranges - ] - self.reg_denoms[-1] = self.reg_denoms[-2] * 2 - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.sync_num_pos = sync_num_pos - self.bbox_norm_type = bbox_norm_type - self.gradient_mul = gradient_mul - self.use_vfl = use_vfl - if self.use_vfl: - self.loss_cls = build_loss(loss_cls) - else: - self.loss_cls = build_loss(loss_cls_fl) - self.loss_bbox = build_loss(loss_bbox) - self.loss_bbox_refine = build_loss(loss_bbox_refine) - - # for getting ATSS targets - self.use_atss = use_atss - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - self.anchor_generator = build_anchor_generator(anchor_generator) - self.anchor_center_offset = anchor_generator['center_offset'] - self.num_anchors = self.anchor_generator.num_base_anchors[0] - self.sampling = False - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - def _init_layers(self): - """Initialize layers of the head.""" - super(FCOSHead, self)._init_cls_convs() - super(FCOSHead, self)._init_reg_convs() - self.relu = nn.ReLU(inplace=True) - self.vfnet_reg_conv = ConvModule( - self.feat_channels, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - bias=self.conv_bias) - self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_reg_refine_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1) - self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - self.vfnet_cls_dconv = DeformConv2d( - self.feat_channels, - self.feat_channels, - self.dcn_kernel, - 1, - padding=self.dcn_pad) - self.vfnet_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.cls_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - for m in self.reg_convs: - if isinstance(m.conv, nn.Conv2d): - normal_init(m.conv, std=0.01) - normal_init(self.vfnet_reg_conv.conv, std=0.01) - normal_init(self.vfnet_reg, std=0.01) - normal_init(self.vfnet_reg_refine_dconv, std=0.01) - normal_init(self.vfnet_reg_refine, std=0.01) - normal_init(self.vfnet_cls_dconv, std=0.01) - bias_cls = bias_init_with_prob(0.01) - normal_init(self.vfnet_cls, std=0.01, bias=bias_cls) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - """ - return multi_apply(self.forward_single, feats, self.scales, - self.scales_refine, self.strides, self.reg_denoms) - - def forward_single(self, x, scale, scale_refine, stride, reg_denom): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to - resize the refined bbox prediction. - stride (int): The corresponding stride for feature maps, - used to normalize the bbox prediction when - bbox_norm_type = 'stride'. - reg_denom (int): The corresponding regression range for feature - maps, only used to normalize the bbox prediction when - bbox_norm_type = 'reg_denom'. - - Returns: - tuple: iou-aware cls scores for each box, bbox predictions and - refined bbox predictions of input feature maps. - """ - cls_feat = x - reg_feat = x - - for cls_layer in self.cls_convs: - cls_feat = cls_layer(cls_feat) - - for reg_layer in self.reg_convs: - reg_feat = reg_layer(reg_feat) - - # predict the bbox_pred of different level - reg_feat_init = self.vfnet_reg_conv(reg_feat) - if self.bbox_norm_type == 'reg_denom': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom - elif self.bbox_norm_type == 'stride': - bbox_pred = scale( - self.vfnet_reg(reg_feat_init)).float().exp() * stride - else: - raise NotImplementedError - - # compute star deformable convolution offsets - # converting dcn_offset to reg_feat.dtype thus VFNet can be - # trained with FP16 - dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul, - stride).to(reg_feat.dtype) - - # refine the bbox_pred - reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset)) - bbox_pred_refine = scale_refine( - self.vfnet_reg_refine(reg_feat)).float().exp() - bbox_pred_refine = bbox_pred_refine * bbox_pred.detach() - - # predict the iou-aware cls score - cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset)) - cls_score = self.vfnet_cls(cls_feat) - - return cls_score, bbox_pred, bbox_pred_refine - - def star_dcn_offset(self, bbox_pred, gradient_mul, stride): - """Compute the star deformable conv offsets. - - Args: - bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b). - gradient_mul (float): Gradient multiplier. - stride (int): The corresponding stride for feature maps, - used to project the bbox onto the feature map. - - Returns: - dcn_offsets (Tensor): The offsets for deformable convolution. - """ - dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred) - bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \ - gradient_mul * bbox_pred - # map to the feature map scale - bbox_pred_grad_mul = bbox_pred_grad_mul / stride - N, C, H, W = bbox_pred.size() - - x1 = bbox_pred_grad_mul[:, 0, :, :] - y1 = bbox_pred_grad_mul[:, 1, :, :] - x2 = bbox_pred_grad_mul[:, 2, :, :] - y2 = bbox_pred_grad_mul[:, 3, :, :] - bbox_pred_grad_mul_offset = bbox_pred.new_zeros( - N, 2 * self.num_dconv_points, H, W) - bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1 - bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2 - bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1 - bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2 - bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2 - dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset - - return dcn_offset - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) - def loss(self, - cls_scores, - bbox_preds, - bbox_preds_refine, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level, each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box offsets for each - scale level, each is a 4D-tensor, the channel number is - num_points * 4. - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level, each is a 4D-tensor, the channel - number is num_points * 4. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - Default: None. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, - bbox_preds[0].device) - labels, label_weights, bbox_targets, bbox_weights = self.get_targets( - cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas, - gt_bboxes_ignore) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and bbox_preds_refine - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, - 1).reshape(-1, - self.cls_out_channels).contiguous() - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred in bbox_preds - ] - flatten_bbox_preds_refine = [ - bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous() - for bbox_pred_refine in bbox_preds_refine - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = torch.where( - ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0] - num_pos = len(pos_inds) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds] - pos_labels = flatten_labels[pos_inds] - - # sync num_pos across all gpus - if self.sync_num_pos: - num_pos_avg_per_gpu = reduce_mean( - pos_inds.new_tensor(num_pos).float()).item() - num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0) - else: - num_pos_avg_per_gpu = num_pos - - if num_pos > 0: - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_points = flatten_points[pos_inds] - - pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) - pos_decoded_target_preds = distance2bbox(pos_points, - pos_bbox_targets) - iou_targets_ini = bbox_overlaps( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_ini = iou_targets_ini.clone().detach() - iou_targets_ini_avg_per_gpu = reduce_mean( - bbox_weights_ini.sum()).item() - bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0) - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds.detach(), - weight=bbox_weights_ini, - avg_factor=bbox_avg_factor_ini) - - pos_decoded_bbox_preds_refine = \ - distance2bbox(pos_points, pos_bbox_preds_refine) - iou_targets_rf = bbox_overlaps( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - is_aligned=True).clamp(min=1e-6) - bbox_weights_rf = iou_targets_rf.clone().detach() - iou_targets_rf_avg_per_gpu = reduce_mean( - bbox_weights_rf.sum()).item() - bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0) - loss_bbox_refine = self.loss_bbox_refine( - pos_decoded_bbox_preds_refine, - pos_decoded_target_preds.detach(), - weight=bbox_weights_rf, - avg_factor=bbox_avg_factor_rf) - - # build IoU-aware cls_score targets - if self.use_vfl: - pos_ious = iou_targets_rf.clone().detach() - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - cls_iou_targets[pos_inds, pos_labels] = pos_ious - else: - loss_bbox = pos_bbox_preds.sum() * 0 - loss_bbox_refine = pos_bbox_preds_refine.sum() * 0 - if self.use_vfl: - cls_iou_targets = torch.zeros_like(flatten_cls_scores) - - if self.use_vfl: - loss_cls = self.loss_cls( - flatten_cls_scores, - cls_iou_targets, - avg_factor=num_pos_avg_per_gpu) - else: - loss_cls = self.loss_cls( - flatten_cls_scores, - flatten_labels, - weight=label_weights, - avg_factor=num_pos_avg_per_gpu) - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_bbox_rf=loss_bbox_refine) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine')) - def get_bboxes(self, - cls_scores, - bbox_preds, - bbox_preds_refine, - img_metas, - cfg=None, - rescale=None, - with_nms=True): - """Transform network outputs for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box offsets for each scale - level with shape (N, num_points * 4, H, W). - bbox_preds_refine (list[Tensor]): Refined Box offsets for - each scale level with shape (N, num_points * 4, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config): Test / postprocessing configuration, - if None, test_cfg would be used. Default: None. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before returning boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where the first 4 columns - are bounding box positions (tl_x, tl_y, br_x, br_y) and the - 5-th column is a score between 0 and 1. The second item is a - (n,) tensor where each item is the predicted class label of - the corresponding box. - """ - assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine) - num_levels = len(cls_scores) - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, - bbox_preds[0].device) - result_list = [] - for img_id in range(len(img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_pred_list = [ - bbox_preds_refine[i][img_id].detach() - for i in range(num_levels) - ] - img_shape = img_metas[img_id]['img_shape'] - scale_factor = img_metas[img_id]['scale_factor'] - det_bboxes = self._get_bboxes_single(cls_score_list, - bbox_pred_list, mlvl_points, - img_shape, scale_factor, cfg, - rescale, with_nms) - result_list.append(det_bboxes) - return result_list - - def _get_bboxes_single(self, - cls_scores, - bbox_preds, - mlvl_points, - img_shape, - scale_factor, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for a single scale - level with shape (num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box offsets for a single scale - level with shape (num_points * 4, H, W). - mlvl_points (list[Tensor]): Box reference for a single scale level - with shape (num_total_points, 4). - img_shape (tuple[int]): Shape of the input image, - (height, width, 3). - scale_factor (ndarray): Scale factor of the image arrange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before returning boxes. - Default: True. - - Returns: - tuple(Tensor): - det_bboxes (Tensor): BBox predictions in shape (n, 5), where - the first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score - between 0 and 1. - det_labels (Tensor): A (n,) tensor where each item is the - predicted class label of the corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) - mlvl_bboxes = [] - mlvl_scores = [] - for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds, - mlvl_points): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - scores = cls_score.permute(1, 2, 0).reshape( - -1, self.cls_out_channels).contiguous().sigmoid() - bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous() - - nms_pre = cfg.get('nms_pre', -1) - if 0 < nms_pre < scores.shape[0]: - max_scores, _ = scores.max(dim=1) - _, topk_inds = max_scores.topk(nms_pre) - points = points[topk_inds, :] - bbox_pred = bbox_pred[topk_inds, :] - scores = scores[topk_inds, :] - bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_bboxes = torch.cat(mlvl_bboxes) - if rescale: - mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) - mlvl_scores = torch.cat(mlvl_scores) - padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) - if with_nms: - det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, - cfg.score_thr, cfg.nms, - cfg.max_per_img) - return det_bboxes, det_labels - else: - return mlvl_bboxes, mlvl_scores - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points according to feature map sizes.""" - h, w = featmap_size - x_range = torch.arange( - 0, w * stride, stride, dtype=dtype, device=device) - y_range = torch.arange( - 0, h * stride, stride, dtype=dtype, device=device) - y, x = torch.meshgrid(y_range, x_range) - # to be compatible with anchor points in ATSS - if self.use_atss: - points = torch.stack( - (x.reshape(-1), y.reshape(-1)), dim=-1) + \ - stride * self.anchor_center_offset - else: - points = torch.stack( - (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 - return points - - def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels, - img_metas, gt_bboxes_ignore): - """A wrapper for computing ATSS and FCOS targets for points in multiple - images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - gt_bboxes (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor/None): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - bbox_weights (Tensor/None): Bbox weights of all levels. - """ - if self.use_atss: - return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes, - gt_labels, img_metas, - gt_bboxes_ignore) - else: - self.norm_on_bbox = False - return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels) - - def _get_target_single(self, *args, **kwargs): - """Avoid ambiguity in multiple inheritance.""" - if self.use_atss: - return ATSSHead._get_target_single(self, *args, **kwargs) - else: - return FCOSHead._get_target_single(self, *args, **kwargs) - - def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute FCOS regression and classification targets for points in - multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - - Returns: - tuple: - labels (list[Tensor]): Labels of each level. - label_weights: None, to be compatible with ATSS targets. - bbox_targets (list[Tensor]): BBox targets of each level. - bbox_weights: None, to be compatible with ATSS targets. - """ - labels, bbox_targets = FCOSHead.get_targets(self, points, - gt_bboxes_list, - gt_labels_list) - label_weights = None - bbox_weights = None - return labels, label_weights, bbox_targets, bbox_weights - - def get_atss_targets(self, - cls_scores, - mlvl_points, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """A wrapper for computing ATSS targets for points in multiple images. - - Args: - cls_scores (list[Tensor]): Box iou-aware scores for each scale - level with shape (N, num_points * num_classes, H, W). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - gt_bboxes (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be - ignored, shape (num_ignored_gts, 4). Default: None. - - Returns: - tuple: - labels_list (list[Tensor]): Labels of each level. - label_weights (Tensor): Label weights of all levels. - bbox_targets_list (list[Tensor]): Regression targets of each - level, (l, t, r, b). - bbox_weights (Tensor): Bbox weights of all levels. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - - cls_reg_targets = ATSSHead.get_targets( - self, - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - unmap_outputs=True) - if cls_reg_targets is None: - return None - - (anchor_list, labels_list, label_weights_list, bbox_targets_list, - bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets - - bbox_targets_list = [ - bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list - ] - - num_imgs = len(img_metas) - # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format - bbox_targets_list = self.transform_bbox_targets( - bbox_targets_list, mlvl_points, num_imgs) - - labels_list = [labels.reshape(-1) for labels in labels_list] - label_weights_list = [ - label_weights.reshape(-1) for label_weights in label_weights_list - ] - bbox_weights_list = [ - bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list - ] - label_weights = torch.cat(label_weights_list) - bbox_weights = torch.cat(bbox_weights_list) - return labels_list, label_weights, bbox_targets_list, bbox_weights - - def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs): - """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format. - - Args: - decoded_bboxes (list[Tensor]): Regression targets of each level, - in the form of (x1, y1, x2, y2). - mlvl_points (list[Tensor]): Points of each fpn level, each has - shape (num_points, 2). - num_imgs (int): the number of images in a batch. - - Returns: - bbox_targets (list[Tensor]): Regression targets of each level in - the form of (l, t, r, b). - """ - # TODO: Re-implemented in Class PointCoder - assert len(decoded_bboxes) == len(mlvl_points) - num_levels = len(decoded_bboxes) - mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points] - bbox_targets = [] - for i in range(num_levels): - bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i]) - bbox_targets.append(bbox_target) - - return bbox_targets - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - """Override the method in the parent class to avoid changing para's - name.""" - pass diff --git a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py b/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py deleted file mode 100644 index 28e6e9166c02277dd398a68663d2e9b65d4ff4d1..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/model/transformer_ops/position_embedding.py +++ /dev/null @@ -1,82 +0,0 @@ -import torch -import torch.nn as nn -import math - - -###################################################################################### -# position embedding -###################################################################################### -class PositionEmbeddingLearned(nn.Module): - """ - This is a learned version of the position embedding - """ - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(32, num_pos_feats) - self.col_embed = nn.Embedding(32, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, x, mask): - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i).unsqueeze(0).repeat(h, 1, 1) - y_emb = self.row_embed(j).unsqueeze(1).repeat(1, w, 1) - pos = (x_emb + y_emb).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) - return pos - - -class PositionEmbeddingSine(nn.Module): - """ - This is a standard version of the position embedding, very similar to the one used by the - "Attention is all you need" paper, generalized to work on examples - """ - def __init__(self, feats_dim=512, temperature=10000, normalize=False, scale=None): - """ - explicitly encode the position using the sinusoid: - PE(pos,2i) = sin(pos/temperature^(2*i/d_model)) - PE(pos,2i+1) = cos(pos/temperature^(2*i/d_model)) - :param feats_dim: the dimension of features, each dimension of the positional embedding to a sinusoid - :param temperature: wavelengths from a geometric progression from scale - :param normalize: whether to normalize the position to (0,1) - :param scale: scale for the position embedding - """ - super(PositionEmbeddingSine, self).__init__() - self.feats_dim = feats_dim - self.T = temperature - self.norm = normalize - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, x, mask): - x_embed = mask.cumsum(1, dtype=torch.float32) - y_embed = mask.cumsum(2, dtype=torch.float32) - if self.norm: - eps = 1e-5 - x_embed = x_embed / (x_embed[:, -1:, :] + eps) * self.scale - y_embed = y_embed / (y_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.feats_dim, dtype=torch.float32, device=x.device) - dim_t = self.T ** (2*(dim_t//2)/self.feats_dim) - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - - pos_x[:, :, :, 0::2], pos_x[:, :, :, 1::2] = pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos() - pos_y[:, :, :, 0::2], pos_y[:, :, :, 1::2] = pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos() - pos = (pos_x + pos_y).permute(0, 3, 1, 2) * 0.5 - return pos - - -def build_position_embed(embed_type='learned', feats_dim=512, temperature=10000): - if embed_type == 'sine': - pos_embed = PositionEmbeddingSine(feats_dim, temperature, normalize=True) - elif embed_type == 'learned': - pos_embed = PositionEmbeddingLearned(feats_dim) - else: - raise ValueError(f"nor supported {embed_type}") - return pos_embed diff --git a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py b/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py deleted file mode 100644 index 6517853be49e61c427cf7cd9b5ed203f6d5f367e..0000000000000000000000000000000000000000 --- a/spaces/ArkanDash/rvc-models-new/lib/infer_pack/onnx_inference.py +++ /dev/null @@ -1,145 +0,0 @@ -import onnxruntime -import librosa -import numpy as np -import soundfile - - -class ContentVec: - def __init__(self, vec_path="pretrained/vec-768-layer-12.onnx", device=None): - print("load model(s) from {}".format(vec_path)) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(vec_path, providers=providers) - - def __call__(self, wav): - return self.forward(wav) - - def forward(self, wav): - feats = wav - if feats.ndim == 2: # double channels - feats = feats.mean(-1) - assert feats.ndim == 1, feats.ndim - feats = np.expand_dims(np.expand_dims(feats, 0), 0) - onnx_input = {self.model.get_inputs()[0].name: feats} - logits = self.model.run(None, onnx_input)[0] - return logits.transpose(0, 2, 1) - - -def get_f0_predictor(f0_predictor, hop_length, sampling_rate, **kargs): - if f0_predictor == "pm": - from lib.infer_pack.modules.F0Predictor.PMF0Predictor import PMF0Predictor - - f0_predictor_object = PMF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "harvest": - from lib.infer_pack.modules.F0Predictor.HarvestF0Predictor import ( - HarvestF0Predictor, - ) - - f0_predictor_object = HarvestF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - elif f0_predictor == "dio": - from lib.infer_pack.modules.F0Predictor.DioF0Predictor import DioF0Predictor - - f0_predictor_object = DioF0Predictor( - hop_length=hop_length, sampling_rate=sampling_rate - ) - else: - raise Exception("Unknown f0 predictor") - return f0_predictor_object - - -class OnnxRVC: - def __init__( - self, - model_path, - sr=40000, - hop_size=512, - vec_path="vec-768-layer-12", - device="cpu", - ): - vec_path = f"pretrained/{vec_path}.onnx" - self.vec_model = ContentVec(vec_path, device) - if device == "cpu" or device is None: - providers = ["CPUExecutionProvider"] - elif device == "cuda": - providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] - elif device == "dml": - providers = ["DmlExecutionProvider"] - else: - raise RuntimeError("Unsportted Device") - self.model = onnxruntime.InferenceSession(model_path, providers=providers) - self.sampling_rate = sr - self.hop_size = hop_size - - def forward(self, hubert, hubert_length, pitch, pitchf, ds, rnd): - onnx_input = { - self.model.get_inputs()[0].name: hubert, - self.model.get_inputs()[1].name: hubert_length, - self.model.get_inputs()[2].name: pitch, - self.model.get_inputs()[3].name: pitchf, - self.model.get_inputs()[4].name: ds, - self.model.get_inputs()[5].name: rnd, - } - return (self.model.run(None, onnx_input)[0] * 32767).astype(np.int16) - - def inference( - self, - raw_path, - sid, - f0_method="dio", - f0_up_key=0, - pad_time=0.5, - cr_threshold=0.02, - ): - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0_predictor = get_f0_predictor( - f0_method, - hop_length=self.hop_size, - sampling_rate=self.sampling_rate, - threshold=cr_threshold, - ) - wav, sr = librosa.load(raw_path, sr=self.sampling_rate) - org_length = len(wav) - if org_length / sr > 50.0: - raise RuntimeError("Reached Max Length") - - wav16k = librosa.resample(wav, orig_sr=self.sampling_rate, target_sr=16000) - wav16k = wav16k - - hubert = self.vec_model(wav16k) - hubert = np.repeat(hubert, 2, axis=2).transpose(0, 2, 1).astype(np.float32) - hubert_length = hubert.shape[1] - - pitchf = f0_predictor.compute_f0(wav, hubert_length) - pitchf = pitchf * 2 ** (f0_up_key / 12) - pitch = pitchf.copy() - f0_mel = 1127 * np.log(1 + pitch / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - pitch = np.rint(f0_mel).astype(np.int64) - - pitchf = pitchf.reshape(1, len(pitchf)).astype(np.float32) - pitch = pitch.reshape(1, len(pitch)) - ds = np.array([sid]).astype(np.int64) - - rnd = np.random.randn(1, 192, hubert_length).astype(np.float32) - hubert_length = np.array([hubert_length]).astype(np.int64) - - out_wav = self.forward(hubert, hubert_length, pitch, pitchf, ds, rnd).squeeze() - out_wav = np.pad(out_wav, (0, 2 * self.hop_size), "constant") - return out_wav[0:org_length] diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/build/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py deleted file mode 100644 index a508ffa80bd715b47c190ed9d747dbc388fa5b19..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/measure.py +++ /dev/null @@ -1,151 +0,0 @@ -from operator import itemgetter -from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence - -from . import errors -from .protocol import is_renderable, rich_cast - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderableType - - -class Measurement(NamedTuple): - """Stores the minimum and maximum widths (in characters) required to render an object.""" - - minimum: int - """Minimum number of cells required to render.""" - maximum: int - """Maximum number of cells required to render.""" - - @property - def span(self) -> int: - """Get difference between maximum and minimum.""" - return self.maximum - self.minimum - - def normalize(self) -> "Measurement": - """Get measurement that ensures that minimum <= maximum and minimum >= 0 - - Returns: - Measurement: A normalized measurement. - """ - minimum, maximum = self - minimum = min(max(0, minimum), maximum) - return Measurement(max(0, minimum), max(0, max(minimum, maximum))) - - def with_maximum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are <= width. - - Args: - width (int): Maximum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - return Measurement(min(minimum, width), min(maximum, width)) - - def with_minimum(self, width: int) -> "Measurement": - """Get a RenderableWith where the widths are >= width. - - Args: - width (int): Minimum desired width. - - Returns: - Measurement: New Measurement object. - """ - minimum, maximum = self - width = max(0, width) - return Measurement(max(minimum, width), max(maximum, width)) - - def clamp( - self, min_width: Optional[int] = None, max_width: Optional[int] = None - ) -> "Measurement": - """Clamp a measurement within the specified range. - - Args: - min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None. - max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None. - - Returns: - Measurement: New Measurement object. - """ - measurement = self - if min_width is not None: - measurement = measurement.with_minimum(min_width) - if max_width is not None: - measurement = measurement.with_maximum(max_width) - return measurement - - @classmethod - def get( - cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType" - ) -> "Measurement": - """Get a measurement for a renderable. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderable (RenderableType): An object that may be rendered with Rich. - - Raises: - errors.NotRenderableError: If the object is not renderable. - - Returns: - Measurement: Measurement object containing range of character widths required to render the object. - """ - _max_width = options.max_width - if _max_width < 1: - return Measurement(0, 0) - if isinstance(renderable, str): - renderable = console.render_str( - renderable, markup=options.markup, highlight=False - ) - renderable = rich_cast(renderable) - if is_renderable(renderable): - get_console_width: Optional[ - Callable[["Console", "ConsoleOptions"], "Measurement"] - ] = getattr(renderable, "__rich_measure__", None) - if get_console_width is not None: - render_width = ( - get_console_width(console, options) - .normalize() - .with_maximum(_max_width) - ) - if render_width.maximum < 1: - return Measurement(0, 0) - return render_width.normalize() - else: - return Measurement(0, _max_width) - else: - raise errors.NotRenderableError( - f"Unable to get render width for {renderable!r}; " - "a str, Segment, or object with __rich_console__ method is required" - ) - - -def measure_renderables( - console: "Console", - options: "ConsoleOptions", - renderables: Sequence["RenderableType"], -) -> "Measurement": - """Get a measurement that would fit a number of renderables. - - Args: - console (~rich.console.Console): Console instance. - options (~rich.console.ConsoleOptions): Console options. - renderables (Iterable[RenderableType]): One or more renderable objects. - - Returns: - Measurement: Measurement object containing range of character widths required to - contain all given renderables. - """ - if not renderables: - return Measurement(0, 0) - get_measurement = Measurement.get - measurements = [ - get_measurement(console, options, renderable) for renderable in renderables - ] - measured_width = Measurement( - max(measurements, key=itemgetter(0)).minimum, - max(measurements, key=itemgetter(1)).maximum, - ) - return measured_width diff --git a/spaces/AutoBG/Auto-BoardGame/description_generator.py b/spaces/AutoBG/Auto-BoardGame/description_generator.py deleted file mode 100644 index a1106fb9150cf3f143185a7610065ce0ea96b924..0000000000000000000000000000000000000000 --- a/spaces/AutoBG/Auto-BoardGame/description_generator.py +++ /dev/null @@ -1,119 +0,0 @@ - -import numpy as np -import re -import spacy -import openai -from operator import itemgetter -#user input manager class -class input_manager: - - #initialize key dictionary from vector data frame - def __init__(self,key_df, slim_df, search_tokens): - self.key_df = key_df - self.slim_df = slim_df - self.search_tokens = search_tokens - self.key = dict(zip(list(key_df.columns),np.zeros(len(key_df.columns)))) - self.nlp = spacy.load("en_core_web_md") - - #translate input text to vector - def set_input(self,input_cats): - #need setup to apply correct group tag to values - #separate known/unknown features - k_flags = [cat for cat in input_cats if cat in list(self.key.keys())] - unk_flags = [cat for cat in input_cats if cat not in list(self.key.keys())] - - #process within feature class similarity for each unknown input - if len(unk_flags)>0: - - outs = [] - for word in unk_flags: - if re.match(r"game_type_",word): - tok = self.nlp(word.split("_")[-1]) - mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[0]],key=itemgetter(1)) - #if no known match is found (model doesn't recognize input word), we're going to discard - other solutions performance prohibitive - if mtch[1]>0: - outs.append("game_type_"+mtch[0]) - elif re.match(r"mechanic_",word): - tok = self.nlp(word.split("_")[-1]) - mtch = max([(key,key.similarity(tok)) for key in self.search_tokens[1]],key=itemgetter(1)) - if mtch[1]>0: - outs.append("mechanic_"+mtch[0]) - elif re.match(r"category_",word): - tok = self.nlp(word.split("_")[-1]) - mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[2]],key=itemgetter(1)) - if mtch[1]>0: - outs.append("category_"+mtch[0]) - elif re.match(r"family_",word): - tok = self.nlp(word.split("_")[-1]) - mtch=max([(key,key.similarity(tok)) for key in self.search_tokens[3]],key=itemgetter(1)) - if mtch[1]>0: - outs.append("family_"+str(mtch[0])) - - #if unks are processed, rejoin nearest match to known. - k_flags = list(set(k_flags+outs)) - - #preserve global key and ouput copy w/input keys activated to 1 - d = self.key.copy() - for cat in k_flags: - d[cat] = 1.0 - - # DELETE ME - return d - - def input_parser(self,in_vec): - #extracting keys from processed vector - ks = [k for k,v in in_vec.items() if v == 1] - - return ks - -class model_control: - def __init__(self, apikey, model_id): - self.api_key = apikey - openai.api_key = self.api_key - - self.prompt = None - - self.model = openai.FineTune.retrieve(id=model_id).fine_tuned_model - - def prompt_formatter(self,ks): - self.prompt = ". ".join(ks) + "\n\n###\n\n" - - - - def call_api(self,status=0): - if status == 0: - temp=0.5 - pres=0.7 - elif status == 1: - temp=0.4 - pres=0.6 - elif status == 2: - temp=0.5 - pres=0.8 - - answer = openai.Completion.create( - model=self.model, - prompt=self.prompt, - max_tokens=512, - temperature=temp, - stop=["END"], - presence_penalty=pres, - frequency_penalty=0.5 - ) - return answer['choices'][0]['text'] - - def resp_cleanup(self,text): - - if ((text[-1] != "!") & (text[-1] != ".") & (text[-1] != "?")): - text = " ".join([e+'.' for e in text.split('.')[0:-1] if e]) - - sent = re.split(r'([.?!:])', text) - phrases = ["[Dd]esigned by","[Dd]esigner of","[Aa]rt by","[Aa]rtist of","[Pp]ublished","[Pp]ublisher of"] - - pat = re.compile("(?:" + "|".join(phrases) + ")") - fix = re.compile("(?<=[.!?])[.!?]") - - text = re.sub(fix,'',''.join([s for s in sent if pat.search(s) == None])) - - - return text diff --git a/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py b/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py deleted file mode 100644 index ea6f6f9a894db245368bdaecb90faa266547a82e..0000000000000000000000000000000000000000 --- a/spaces/AutoLLM/AutoAgents/autoagents/utils/logger.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import json -from typing import Dict, Any -import uuid -from datetime import datetime -import pytz - -import huggingface_hub -from huggingface_hub import Repository - - -class InteractionsLogger: - def __init__(self, name: str, persist=False): - self.persist = persist - self.counter = 0 - self.name = name # unique id - HF_TOKEN = os.environ.get("HF_TOKEN") - HF_DATASET_REPO_URL = os.environ.get("HF_DATASET_REPO_URL") - if (HF_TOKEN is not None) and (HF_DATASET_REPO_URL is not None): - self.repo = Repository( - local_dir="data", clone_from=HF_DATASET_REPO_URL, use_auth_token=HF_TOKEN - ) - else: - self.persist = False - - def set_goal(self, goal: str): - # Initialize two variables for saving two files (self.messages for - # training and self.structure_data for later use) - self.messages = [{"goal": goal}] - self.structured_data = {"goal": goal} - - def add_system(self, more: Dict): - self.convos = [{"from": "system"} | more] - - def add_ai(self, msg: str): - self.convos.append({"from": "ai", "value": msg}) - self.messages.append({"id": f"{self.name}_{self.counter}", "conversations": self.convos}) - self.counter += 1 - - def add_structured_data(self, data: Dict[str, Any]): - self.structured_data.update({f"turn_{self.counter}": data}) - - def add_message(self, data: Dict[str, Any]): - self.structured_data.update(data) - - def save(self): - # add current datetime - self.add_message({"datetime": datetime.now(pytz.utc).strftime("%m/%d/%Y %H:%M:%S %Z%z")}) - if self.persist: - # TODO: want to add retry in a loop? - self.repo.git_pull() - fname = uuid.uuid4().hex[:16] - with open(f"./data/{fname}.json", "w") as f: - json.dump(self.messages, f, indent=2) - with open(f"./data/{fname}.clean.json", "w") as f: - json.dump(self.structured_data, f, indent=2) - commit_url = self.repo.push_to_hub() - - def add_cost(self, cost): - self.messages.append({"metrics": cost}) \ No newline at end of file diff --git a/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py b/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py deleted file mode 100644 index 32b6bcee854da7ae357daf82bd986f30db9fb72c..0000000000000000000000000000000000000000 --- a/spaces/Awesimo/jojogan/e4e/scripts/calc_losses_on_images.py +++ /dev/null @@ -1,87 +0,0 @@ -from argparse import ArgumentParser -import os -import json -import sys -from tqdm import tqdm -import numpy as np -import torch -from torch.utils.data import DataLoader -import torchvision.transforms as transforms - -sys.path.append(".") -sys.path.append("..") - -from criteria.lpips.lpips import LPIPS -from datasets.gt_res_dataset import GTResDataset - - -def parse_args(): - parser = ArgumentParser(add_help=False) - parser.add_argument('--mode', type=str, default='lpips', choices=['lpips', 'l2']) - parser.add_argument('--data_path', type=str, default='results') - parser.add_argument('--gt_path', type=str, default='gt_images') - parser.add_argument('--workers', type=int, default=4) - parser.add_argument('--batch_size', type=int, default=4) - parser.add_argument('--is_cars', action='store_true') - args = parser.parse_args() - return args - - -def run(args): - resize_dims = (256, 256) - if args.is_cars: - resize_dims = (192, 256) - transform = transforms.Compose([transforms.Resize(resize_dims), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - - print('Loading dataset') - dataset = GTResDataset(root_path=args.data_path, - gt_dir=args.gt_path, - transform=transform) - - dataloader = DataLoader(dataset, - batch_size=args.batch_size, - shuffle=False, - num_workers=int(args.workers), - drop_last=True) - - if args.mode == 'lpips': - loss_func = LPIPS(net_type='alex') - elif args.mode == 'l2': - loss_func = torch.nn.MSELoss() - else: - raise Exception('Not a valid mode!') - loss_func.cuda() - - global_i = 0 - scores_dict = {} - all_scores = [] - for result_batch, gt_batch in tqdm(dataloader): - for i in range(args.batch_size): - loss = float(loss_func(result_batch[i:i + 1].cuda(), gt_batch[i:i + 1].cuda())) - all_scores.append(loss) - im_path = dataset.pairs[global_i][0] - scores_dict[os.path.basename(im_path)] = loss - global_i += 1 - - all_scores = list(scores_dict.values()) - mean = np.mean(all_scores) - std = np.std(all_scores) - result_str = 'Average loss is {:.2f}+-{:.2f}'.format(mean, std) - print('Finished with ', args.data_path) - print(result_str) - - out_path = os.path.join(os.path.dirname(args.data_path), 'inference_metrics') - if not os.path.exists(out_path): - os.makedirs(out_path) - - with open(os.path.join(out_path, 'stat_{}.txt'.format(args.mode)), 'w') as f: - f.write(result_str) - with open(os.path.join(out_path, 'scores_{}.json'.format(args.mode)), 'w') as f: - json.dump(scores_dict, f) - - -if __name__ == '__main__': - args = parse_args() - run(args) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py deleted file mode 100644 index b4ceb066faf696954244205dc75376b767071217..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco_keypoint.py +++ /dev/null @@ -1,13 +0,0 @@ -from detectron2.data.detection_utils import create_keypoint_hflip_indices - -from .coco import dataloader - -dataloader.train.dataset.min_keypoints = 1 -dataloader.train.dataset.names = "keypoints_coco_2017_train" -dataloader.test.dataset.names = "keypoints_coco_2017_val" - -dataloader.train.mapper.update( - use_instance_mask=False, - use_keypoint=True, - keypoint_hflip_indices=create_keypoint_hflip_indices(dataloader.train.dataset.names), -) diff --git a/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md b/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md deleted file mode 100644 index a975036c9735c8e8eda08d693b9dbf70cf9672bd..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Blockman Ir Nueva Versin Apk.md +++ /dev/null @@ -1,71 +0,0 @@ -
      -

      Blockman GO Nueva versión APK: Todo lo que necesitas saber

      -

      ¿Estás buscando una aplicación divertida y emocionante que te permita jugar diferentes juegos, chatear con amigos y crear tus propios mundos? Si es así, es posible que desee echa un vistazo Blockman GO nueva versión APK. Esta es una aplicación gratuita que ofrece una gran cantidad de características y contenido para los jugadores de todas las edades y preferencias. En este artículo, le diremos todo lo que necesita saber sobre Blockman GO nueva versión APK, incluyendo lo que es, lo que es nuevo en la última versión, cómo descargarlo e instalarlo, y por qué debe jugar.

      -

      blockman ir nueva versión apk


      DOWNLOADhttps://bltlly.com/2v6MEL



      -

      ¿Qué es Blockman GO?

      -

      Blockman GO es una aplicación gratuita que te permite jugar varios minijuegos de estilo bloque, chatear con otros jugadores y hacer amigos. También puede crear y compartir sus propios juegos utilizando el editor incorporado. Blockman GO tiene una interfaz simple y fácil de usar, y es compatible con varios idiomas. Puedes descargar Blockman GO desde la Google Play Store o el sitio web oficial.

      -

      Una aplicación gratuita con minijuegos, chat y amigos

      -

      Una de las principales características de Blockman GO es que ofrece una amplia gama de minijuegos que puedes jugar con otros jugadores online. Algunos de los minijuegos populares son Bed Wars, Sky Wars, Murder Mystery, Egg Wars, Build Battle, Parkour y más. Cada minijuego tiene sus propias reglas, objetivos y recompensas. También puedes chatear con otros jugadores en el lobby del juego o en el juego. Puedes hacer amistad con otros jugadores enviándoles solicitudes de amistad o uniéndote a sus clubes. También puedes invitar a tus amigos a jugar contigo en habitaciones privadas.

      -

      Una plataforma para crear y compartir tus propios juegos

      - -

      ¿Qué hay de nuevo en la última versión de Blockman GO?

      -

      La última versión de Blockman GO es Garena Blockman GO, que es una colaboración con Garena Free Fire, uno de los juegos de battle royale más populares del mundo. Garena Blockman GO introduce algunas nuevas características y mejoras en la aplicación, como:

      -

      Garena Blockman GO: una colaboración con Garena Free Fire

      -

      Garena Blockman GO es una versión especial de Blockman GO que cuenta con algunos elementos de Garena Free Fire. Por ejemplo, puedes obtener algunas pieles y artículos exclusivos de Garena Free Fire en Blockman GO. También puedes participar en algunos eventos y actividades relacionadas con Garena Free Fire.

      -

      Frontline: un nuevo juego de disparos multijugador 30 vs 30

      -

      Uno de los nuevos minijuegos en Garena Blockman GO es Frontline, que es un juego de disparos multijugador de 30 vs 30. Puedes elegir unirte al equipo azul o al equipo rojo, y luchar contra el equipo enemigo en un mapa grande. Puedes usar varias armas, vehículos y tácticas para ganar el juego. También puedes ganar monedas y puntos de experiencia jugando Frontline.

      -

      Otras características y mejoras

      -

      Garena Blockman GO también trae algunas otras características y mejoras a la aplicación, como:

      -

      -
        -
      • Una nueva interfaz de usuario más colorida y dinámica.
      • -
      • Un nuevo sistema de clasificación que muestra tu nivel y progreso en diferentes minijuegos.
      • -
      • Un nuevo sistema de chat que soporta mensajes de voz y texto.
      • -
      • Un nuevo sistema de recompensas que te da bonificaciones diarias de inicio de sesión, sorteos y logros.
      • -
      • Un nuevo sistema de tienda que te permite comprar y vender artículos usando monedas o diamantes.
      • -
      -

      Cómo descargar e instalar Blockman GO nueva versión APK?

      -

      Si desea descargar e instalar Blockman GO nueva versión APK, puede seguir estos pasos:

      -

      Pasos para descargar e instalar desde el sitio web oficial

      -
        -
      1. Ir al sitio web oficial de Blockman GO y haga clic en el botón "Descargar".
      2. - -
      3. Espera a que termine la descarga y luego abre el archivo APK.
      4. -
      5. Permite la instalación de fuentes desconocidas si tu dispositivo lo solicita.
      6. -
      7. Siga las instrucciones en la pantalla para completar la instalación.
      8. -
      9. Iniciar la aplicación y disfrutar de la reproducción de Blockman GO nueva versión APK.
      10. -
      -

      Consejos para evitar malware y virus

      -

      Al descargar e instalar Blockman GO nueva versión APK, usted debe tener cuidado de algunos riesgos potenciales, tales como malware y virus. Estos son algunos consejos para evitarlos:

      -
        -
      • Solo descargar el archivo APK desde el sitio web oficial o la Google Play Store. No confíes en fuentes de terceros que dicen ofrecer el archivo APK.
      • -
      • Escanear el archivo APK con un software antivirus fiable antes de abrirlo.
      • -
      • No conceda permisos innecesarios ni acceso a la aplicación.
      • -
      • Actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores.
      • -
      -

      ¿Por qué debe jugar Blockman GO nueva versión APK?

      -

      Blockman GO nueva versión APK es una gran aplicación para cualquier persona que ama los juegos, socializar y crear. Aquí hay algunas razones por las que deberías jugar:

      -

      Disfruta de una variedad de juegos divertidos y creativos

      -

      Blockman GO nueva versión APK ofrece una variedad de juegos divertidos y creativos que se puede jugar con otros jugadores en línea. Puedes elegir entre diferentes géneros, como acción, aventura, rompecabezas, estrategia, casual y más. También puedes probar algunos de los nuevos juegos que se agregan regularmente, como Frontline, Garena Free Fire y más. También puedes crear tus propios juegos usando el editor integrado y compartirlos con otros jugadores.

      -

      Conoce y chatea con jugadores de todo el mundo

      - -

      Personaliza tu avatar y decora tu hogar

      -

      Blockman GO nueva versión APK también le permite personalizar su avatar y decorar su hogar. Puedes elegir entre diferentes pieles, trajes, accesorios, peinados y más para hacer que tu avatar luzca único. También puedes comprar o ganar algunos artículos de Garena Free Fire en Blockman GO. También puede decorar su hogar con diferentes muebles, fondos de pantalla, pisos, ventanas, puertas y más. También puedes invitar a otros jugadores a visitar tu casa o sus hogares.

      -

      Conclusión

      -

      Blockman GO nueva versión APK es una aplicación gratuita que le permite jugar diferentes juegos, chatear con amigos, y crear sus propios mundos. Tiene muchas características y contenido para jugadores de todas las edades y preferencias. También tiene algunas nuevas características y mejoras en la última versión, como Garena Blockman GO, Frontline y más. Puede descargar e instalar Blockman GO nueva versión APK desde el sitio web oficial o la Google Play Store. También debe tener cuidado con el malware y los virus al descargar e instalar la aplicación. Usted debe jugar Blockman GO nueva versión APK porque es divertido, creativo, y social.

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre Blockman GO nueva versión APK:

      -
        -
      1. Es Blockman GO nueva versión APK seguro? Es Blockman GO nueva versión APK seguro?
      2. -

        Blockman GO nueva versión APK es seguro, siempre y cuando se descarga desde el sitio web oficial o la Google Play Store. También debe escanear el archivo APK con un software antivirus confiable antes de abrirlo. También debe evitar conceder permisos innecesarios o acceso a la aplicación. También debe actualizar la aplicación regularmente para obtener los últimos parches de seguridad y correcciones de errores.

        -
      3. ¿Cómo puedo obtener monedas y diamantes en Blockman GO nueva versión APK?
      4. - -
      5. ¿Cómo puedo crear mis propios juegos en Blockman GO nueva versión APK?
      6. -

        Puede crear sus propios juegos en Blockman GO nueva versión APK utilizando el editor incorporado. Puede acceder al editor pulsando en el botón "Crear" en la pantalla principal. Puede usar varias herramientas y recursos para diseñar sus propios mapas, personajes, elementos, scripts y más. También puede probar sus juegos antes de publicarlos. Puede compartir sus juegos con otros jugadores cargándolos en la plataforma Blockman GO. También puedes jugar a juegos de otros jugadores y calificarlos.

        -
      7. ¿Cómo puedo unirse o crear un club en Blockman GO nueva versión APK?
      8. -

        Un club es un grupo de jugadores que comparten un interés común o objetivo en Blockman GO nueva versión APK. Puede unirse o crear un club tocando el botón "Club" en la pantalla principal. Puede buscar clubes existentes por nombre, categoría o popularidad. También puedes crear tu propio club eligiendo un nombre, icono, descripción y categoría. Puedes invitar a otros jugadores a unirse a tu club o aceptar sus solicitudes. También puede chatear con los miembros de su club, enviar regalos y participar en actividades del club.

        -
      9. ¿Cómo puedo contactar con el servicio al cliente de Blockman GO nueva versión APK?
      10. -

        Si usted tiene alguna pregunta, problemas, o retroalimentación acerca de Blockman GO nueva versión APK, puede ponerse en contacto con el servicio al cliente tocando el botón "Feedback" en la pantalla principal. Puede elegir enviar un correo electrónico, un mensaje o una captura de pantalla al servicio de atención al cliente. También puede consultar la sección de preguntas frecuentes para algunos problemas y soluciones comunes.

        -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md b/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md deleted file mode 100644 index 5c12410975eec37b75ef5fdaa16c4ab545ef81a4..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Acrorip 9.0 3 Completo Crack.md +++ /dev/null @@ -1,125 +0,0 @@ - -

      Descargar AcroRip 9.0 3 Grieta completa: Lo que usted necesita saber

      -

      Si está buscando un software que le pueda ayudar a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más, es posible que haya oído hablar de AcroRip. AcroRip es un software RIP (procesador de imágenes raster) que puede controlar los canales de tinta de su impresora y optimizar la calidad y velocidad de impresión. Está especialmente diseñado para impresoras planas UV e impresoras directas para prendas de vestir que utilizan cabezales de impresión Epson.

      -

      descargar acrorip 9.0 3 completo crack


      Download 🗹 https://bltlly.com/2v6JeM



      -

      Sin embargo, AcroRip no es un software libre y requiere un dongle USB especial para ejecutarse. Esto podría hacer que algunas personas busquen una manera de descargar AcroRip 9.0 3 completo crack, que es la última versión del software a partir de ahora. ¿Pero vale la pena? ¿Cuáles son las características, beneficios, inconvenientes, alternativas, pasos de instalación y revisiones de AcroRip 9.0 3? En este artículo, responderemos estas preguntas y le ayudaremos a tomar una decisión informada.

      -

      Características de AcroRip 9.0 3

      -

      AcroRip 9.0 3 es una versión actualizada del software AcroRip anterior que tiene algunas características nuevas y mejoradas. Aquí están algunas de ellas:

      -
        -
      • Impresión en blanco y en color de una pasada: Esta característica le permite imprimir tinta blanca y de color al mismo tiempo, sin necesidad de dos pases para sustratos oscuros. Esto puede ahorrarle tiempo y mejorar la calidad de impresión.
      • -
      • Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10: Esta característica hace que el software sea más sensible y estable, así como compatible con diferentes versiones del sistema operativo Windows.
      • -
      • Configuración de canal personalizado y función de onda: Esta función le permite cambiar los canales a pedido según sus requisitos personalizados. Por ejemplo, si una boquilla de color está obstruida, puede usar un canal blanco y usar tinta de color en ese canal. También puede ajustar la configuración de onda para reducir los problemas de bandas en las impresoras UV.
      • - -
      • Compatibilidad ampliada de controladores y impresoras: Esta función admite más modelos de impresoras Epson, como Stylus Photo, EcoTank, SureColor, Stylus Pro, Expression, etc.
      • -
      -

      Beneficios de AcroRip 9.0 3

      -

      AcroRip 9 . 3 tiene muchos beneficios para los usuarios que quieren imprimir blanco y color juntos en varios sustratos. Estos son algunos de ellos:

      -

      -
        -
      • Calidad y velocidad de impresión mejoradas: AcroRip 9.0 3 puede optimizar la calidad y la velocidad de impresión mediante el control de los canales de tinta, el uso de la impresión de una sola pasada y el uso de la configuración RIP boost. También puede reducir los problemas de bandas mediante el uso de la función de onda.
      • -
      • Menor consumo de tinta y costo: AcroRip 9.0 3 puede ahorrar tinta mediante el uso de ajustes de canal personalizados y el ajuste de la densidad de tinta y el tamaño de la gota. También puede usar tinta de color en canales blancos si es necesario, lo que puede reducir el desperdicio de tinta blanca.
      • -
      • Precisión de color mejorada y perfiles ICC: AcroRip 9.0 3 puede mejorar la precisión y consistencia del color mediante el uso de perfiles ICC y herramientas de gestión de color. También puede soportar CMYK, RGB y colores planos.
      • -
      -

      Inconvenientes de AcroRip 9.0 3

      -

      AcroRip 9.0 3 no es un software perfecto y tiene algunos inconvenientes que los usuarios deben tener en cuenta. Estos son algunos de ellos:

      -
        -
      • Necesidad de un dongle USB especial para ejecutar el software: AcroRip 9.0 3 requiere un dongle USB especial para activar el software y ejecutarlo en su computadora. Esto significa que necesita comprar el dongle desde el sitio web oficial o un distribuidor autorizado, y debe mantenerlo conectado cada vez que use el software. Si pierde o daña el dongle, es posible que ya no pueda usar el software.
      • - -
      -

      Alternativas a AcroRip 9.0 3

      -

      Si no está satisfecho con AcroRip 9.0 3 o desea probar otras opciones, hay algunas alternativas que puede considerar. Estos son algunos de ellos:

      -
        -
      • Cadlink: Cadlink es un software RIP que admite varios tipos de impresoras, como UV, DTG, solvente, eco-solvente, etc. Tiene características como gestión de tinta blanca, creación de perfiles ICC, corrección de color, impresión de datos variables, etc.
      • -
      • EKprint: EKprint es un software RIP diseñado para impresoras DTG que utilizan cabezales de impresión Epson. Tiene características tales como impresión de un paso, cálculo de costo de tinta, verificación de boquilla, limpieza de la cabeza, etc.
      • -
      • Otras opciones de software RIP: Hay muchas otras opciones de software RIP entre las que puede elegir, dependiendo de su modelo de impresora, presupuesto y preferencias. Algunos ejemplos son Wasatch SoftRIP, Onyx RIPCenter, PhotoPrint Server Pro, etc.
      • -
      -

      Instalación de AcroRip 9.0 3

      -

      Si decide comprar AcroRip 9.0 3 desde el sitio web oficial o un distribuidor autorizado, tendrá que seguir estos pasos para instalar el software y el dongle:

      -
        -
      1. Descargue el archivo de software desde el sitio web o el CD: Tendrá que descargar el archivo de software desde el sitio web o insertar el CD en su computadora.
      2. -
      3. Extraiga el archivo y ejecute el archivo setup.exe: Necesitará extraer el archivo usando un programa como WinRAR o WinZip y ejecutar el archivo setup.exe como administrador.
      4. -
      5. Siga las instrucciones del asistente de instalación: Tendrá que seguir las instrucciones del asistente de instalación y elegir su idioma, carpeta de destino, modelo de impresora, etc.
      6. -
      7. Conecte el dongle USB en su computadora: Necesitará conectar el dongle USB a su computadora antes de iniciar el software.
      8. - -
      -

      Grieta de AcroRip 9.0 3

      -

      Si tiene la tentación de descargar AcroRip 9.0 3 grieta completa de una fuente no oficial, como un sitio de torrent o un foro de crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Estos son algunos de ellos:

      -
        -
      • Cuestiones legales: Descargar y usar una versión agrietada de AcroRip 9.0 3 es ilegal y viola los derechos de propiedad intelectual del desarrollador de software. Usted podría enfrentar acciones legales, multas o incluso tiempo en la cárcel si lo atrapan usando una versión rota del software.
      • -
      • Problemas de seguridad: Descargar y usar una versión agrietada de AcroRip 9.0 3 es arriesgado y expone su computadora a malware, virus, spyware, ransomware y otros programas maliciosos. Puede perder sus datos, comprometer su privacidad o dañar su sistema si instala una versión rota del software.
      • -
      • Problemas de rendimiento: Descargar y usar una versión agrietada de AcroRip 9.0 3 no es confiable e inestable. Es posible que experimente errores, bloqueos, bloqueos o problemas técnicos al usar una versión rota del software. También puede perderse actualizaciones, correcciones de errores y nuevas características que ofrece la versión oficial del software.
      • -
      -

      Por lo tanto, le recomendamos encarecidamente que evite descargar y usar una versión agrietada de AcroRip 9.0 3 y en su lugar compre la versión oficial en el sitio web o en un distribuidor autorizado.

      -

      Revisión de AcroRip 9.0 3

      -

      AcroRip 9.0 3 es un software RIP popular y ampliamente utilizado que tiene muchas críticas positivas de los usuarios que lo han probado. Sin embargo, también tiene algunas críticas negativas de los usuarios que han encontrado algunos problemas con él. Aquí hay algunos pros y contras de AcroRip 9.0 3 basado en la retroalimentación del usuario:

      - - -Pros -Contras - - -- Interfaz fácil de usar e intuitiva -- Caro y requiere un dongle - - - -- Problemas antivirus y errores de configuración de lado a lado - - -- Tiempos de carga más rápidos y compatibilidad con Windows 7/8/10 -- Atención al cliente limitada y documentación - - -- Configuración de canal personalizado y función de onda -- No compatible con Mac OS o Linux - - -- Configuración de impulso RIP y funcionalidad de alimentación de rollo -- No hay versión de prueba gratuita o demo disponible - - -- Compatibilidad ampliada de controladores y impresoras -- No hay comunidad en línea o foro para los usuarios - - -- Calidad y velocidad de impresión mejoradas - - - -- Menor consumo y costo de tinta - - - -- Precisión de color mejorada y perfiles ICC - - -

      Conclusión

      -

      En conclusión, AcroRip 9.0 3 es un software RIP que puede ayudarlo a imprimir blanco y color juntos en varios sustratos, como telas, plásticos, metales, cerámica y más. Tiene muchas características, beneficios, inconvenientes, alternativas, pasos de instalación y comentarios que necesita saber antes de decidirse a descargarlo.

      -

      Si desea descargar AcroRip 9.0 3 full crack, debe ser consciente de los riesgos y consecuencias de usar una versión agrietada del software. Es ilegal, arriesgado, poco fiable e inestable. Le recomendamos que compre la versión oficial del sitio web o de un distribuidor autorizado.

      -

      Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación.

      -

      Preguntas frecuentes (preguntas frecuentes)

      -

      Aquí hay algunas preguntas frecuentes que puede tener sobre AcroRip 9.0 3:

      -
        -
      1. ¿Cuál es el precio de AcroRip 9.0 3?
      2. -

        El precio de AcroRip 9.0 3 varía según el vendedor y la región. Sin embargo, según el sitio web oficial, el precio es de $250 USD por un dongle.

        -
      3. ¿Dónde puedo comprar AcroRip 9.0 3?
      4. - -
      5. ¿Cómo puedo actualizar AcroRip 9.0 3?
      6. -

        Puede actualizar AcroRip 9.0 3 descargando la última versión desde el sitio web o el CD e instalándolo en su computadora. Tendrá que mantener el dongle conectado cuando actualice el software.

        -
      7. ¿Cuáles son los requisitos del sistema para AcroRip 9.0 3?
      8. -

        Los requisitos del sistema para AcroRip 9.0 3 son los siguientes:

        -
          -
        • Sistema operativo: Windows 7/8/10 (32 bits o 64 bits)
        • -
        • Procesador: Intel Core i3 o superior
        • -
        • Memoria: 4 GB de RAM o superior
        • -
        • Espacio en disco duro: 1 GB o superior
        • -
        • Pantalla: 1024 x 768 resolución o superior
        • -
        • Impresora: Impresora Epson con cabezal de impresión Epson
        • -
        -
      9. ¿Cómo puedo contactar al equipo de soporte de AcroRip?
      10. -

        Puede ponerse en contacto con el equipo de soporte de AcroRip enviando un correo electrónico a acrorip@acrorip.com o rellenando el formulario de contacto en el sitio web. También puede consultar la sección de preguntas frecuentes y el manual del usuario en el sitio web para obtener más información.

        -
      11. ¿Cómo puedo aprender más sobre AcroRip 9.0 3?
      12. -

        Puede obtener más información sobre AcroRip 9.0 3 visitando el sitio web oficial, viendo los videos tutoriales, leyendo los comentarios de los usuarios y uniéndose al grupo de Facebook para usuarios de AcroRip.

        -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md b/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md deleted file mode 100644 index e7eb552c0227d1e79a99f4695f19800a47a7209c..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Caso Penal La Conspiracin Mod Apk Estrellas Ilimitadas.md +++ /dev/null @@ -1,53 +0,0 @@ - -

      Descargar Garena Free Fire Mod v1.47.0 APK: Cómo obtener la última versión del popular juego Battle Royale

      -

      Si eres un fan de los juegos battle royale, debes haber oído hablar de Garena Free Fire, uno de los juegos más descargados y jugados en dispositivos Android e iOS. En este artículo, le mostraremos cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK, una versión modificada del juego que le da acceso a recursos ilimitados, trucos y más.

      -

      ¿Qué es el fuego libre de Garena?

      -

      Garena Free Fire es un juego multijugador online battle royale desarrollado por 111 Dots Studio y publicado por Garena para dispositivos Android e iOS. El juego fue lanzado en 2017 y desde entonces ha ganado más de 500 millones de descargas solo en Google Play Store.

      -

      descargar caso penal la conspiración mod apk estrellas ilimitadas


      Download File --->>> https://bltlly.com/2v6LUr



      -

      En Garena Free Fire, puedes elegir entre una gran variedad de personajes, armas, vehículos y objetos para sobrevivir en un mapa reducido con hasta 50 jugadores. Puedes jugar en solitario, dúo o modo escuadrón, y personalizar tu personaje con diferentes pieles, trajes, accesorios y mascotas. También puedes unirte o crear un gremio, chatear con otros jugadores, participar en eventos, misiones y torneos, y posicionarte en la clasificación global.

      -

      ¿Qué es Garena Free Fire Mod v1.47.0 APK?

      -

      Garena Free Fire Mod v1.47.0 APK es una versión modificada del juego original que le da algunas características adicionales y ventajas que no están disponibles en la versión oficial. Por ejemplo, puedes obtener diamantes y monedas ilimitadas, que son las principales monedas en el juego que puedes usar para comprar artículos, actualizar a tu personaje o hacer girar la rueda de la suerte.

      - -

      Cómo descargar e instalar Garena Free Fire Mod v1.47.0 APK?

      -

      Si desea probar Garena Free Fire Mod v1.47.0 APK, es necesario seguir estos sencillos pasos:

      -

      Paso 1: Descargar los archivos APK y OBB de una fuente de confianza

      -

      Lo primero que tienes que hacer es descargar los archivos APK y OBB de Garena Free Fire Mod v1.47.0 de una fuente confiable. Puedes usar este enlace o este enlace para obtenerlos.

      -

      El archivo APK es de unos 509 MB de tamaño, mientras que el archivo OBB es de unos 600 MB de tamaño. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargarlo.

      -

      Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo

      -

      Lo siguiente que debe hacer es habilitar fuentes desconocidas en la configuración del dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store o la App Store. Para hacer esto, vaya a la configuración de su dispositivo, luego a la seguridad, luego cambie la opción de fuentes desconocidas.

      -

      -

      Si tienes Android 8.0 o superior, es posible que tengas que permitir la instalación de aplicaciones desde fuentes específicas. Para hacer esto, vaya a la configuración del dispositivo, luego las aplicaciones y las notificaciones, luego las avanzadas, luego el acceso especial a la aplicación, luego instale aplicaciones desconocidas, luego seleccione el navegador o el administrador de archivos que utilizó para descargar los archivos APK y OBB y luego active la opción permitir de esta fuente.

      -

      Paso 3: Instalar el archivo APK y extraer el archivo OBB a la carpeta Android/obb

      -

      Después de habilitar fuentes desconocidas, ahora puede instalar el archivo APK de Garena Free Fire Mod v1.47.0. Para hacer esto, busque el archivo APK en su dispositivo usando un administrador de archivos o un navegador, luego toque en él y siga las instrucciones en la pantalla.

      - -

      Paso 4: Iniciar el juego y disfrutar de las características de mod

      -

      Ahora que ha instalado el archivo APK y extraído el archivo OBB, puede iniciar el juego y disfrutar de las características de mod. Para hacer esto, vaya a su cajón de aplicaciones o pantalla de inicio y toque en el icono de Garena Free Fire. Debería ver una pantalla de carga con un menú mod que le permite activar o desactivar varias características de la versión modded.

      -

      También puede acceder al menú mod tocando el icono flotante en la pantalla durante el juego. Puede ajustar la configuración de acuerdo a sus preferencias y jugar con diamantes y monedas ilimitadas, auto-objetivo y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más.

      -

      ¿Cuáles son las características de Garena Free Fire Mod v1.47.0 APK?

      -

      Garena Free Fire Mod v1.47.0 APK tiene muchas características que lo hacen diferente de la versión original del juego. Estas son algunas de las principales características y beneficios de la versión modded:

      -

      Diamantes y monedas ilimitadas

      -

      Con Garena Free Fire Mod v1.47.0 APK, puede obtener diamantes y monedas ilimitadas en su cuenta. Los diamantes y las monedas son las principales monedas del juego que puedes usar para comprar objetos, mejorar a tu personaje o hacer girar la rueda de la suerte. Normalmente, tienes que gastar dinero real o completar tareas para conseguirlas, pero con esta versión modificada, puedes conseguirlas gratis y sin límite.

      -

      Auto-objetivo y wallhack

      -

      Otra característica de Garena Free Fire Mod v1.47.0 APK es el auto-objetivo y wallhack. El objetivo automático es un truco que te permite apuntar automáticamente a tus enemigos sin tener que ajustar manualmente tu punto de mira. Wallhack es un truco que le permite ver a sus enemigos a través de las paredes y otros obstáculos. Estos trucos pueden ayudarte a ganar más partidos y posicionarte más rápido al darte una ventaja injusta sobre tus oponentes.

      -

      Desbloquear todos los caracteres y skins

      - -

      Sin retroceso y sin niebla

      -

      Otra característica de Garena Free Fire Mod v1.47.0 APK no hay retroceso y no hay niebla. El retroceso es una característica que hace que tu arma se mueva hacia arriba o hacia los lados cuando la disparas, afectando tu precisión y control. La niebla es una característica que reduce la visibilidad en ciertas áreas del mapa, lo que hace que sea más difícil detectar a tus enemigos u objetivos. Estas características pueden afectar negativamente a su juego por lo que es más difícil y frustrante. Con esta versión modificada, puedes eliminarlos completamente y disfrutar de un juego más suave y claro.

      -

      Conclusión

      -

      G arena Free Fire Mod v1.47.0 APK es una versión modificada del popular juego de batalla real que le da recursos ilimitados, trucos, y más. Es fácil de descargar e instalar, y funciona en la mayoría de los dispositivos Android. Con esta versión modificada, puedes disfrutar de un juego más divertido y emocionante con características como diamantes y monedas ilimitadas, puntero automático y wallhack, desbloquear todos los personajes y pieles, sin retroceso y sin niebla, y más.

      -

      Si usted está buscando una manera de darle vida a su experiencia Garena Free Fire, definitivamente debe probar Garena Free Fire Mod v1.47.0 APK. Es gratuito, seguro y actualizado regularmente. Sin embargo, también debes tener cuidado de no abusar de las características del mod o usarlas en partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego. También debes respetar a otros jugadores y jugar limpio.

      -

      Entonces, ¿qué estás esperando? Descargar Garena Free Fire Mod v1.47.0 APK hoy y disfrutar del último juego de batalla real en su dispositivo Android!

      -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes sobre Garena Free Fire Mod v1.47.0 APK:

      -

      Q: ¿Es seguro usar Garena Free Fire Mod v1.47.0 APK?

      - -

      Q: ¿Garena Free Fire Mod v1.47.0 APK es compatible con mi dispositivo?

      -

      A: Garena Free Fire Mod v1.47.0 APK es compatible con la mayoría de los dispositivos Android que tienen Android 4.0.3 o superior y al menos 2 GB de RAM. Sin embargo, es posible que algunos dispositivos no soporten las características de mod o ejecuten el juego sin problemas debido a limitaciones de hardware o conflictos de software.

      -

      Q: ¿Cómo puedo actualizar Garena Free Fire Mod v1.47.0 APK?

      -

      A: Para actualizar Garena Free Fire Mod v1.47.0 APK, es necesario descargar la última versión de los archivos APK y OBB de una fuente de confianza y seguir los mismos pasos que instalarlo por primera vez. También debes hacer una copia de seguridad de los datos del juego antes de actualizarlo para evitar perder tu progreso o configuración.

      -

      Q: ¿Puedo jugar Garena Free Fire Mod v1.47.0 APK con mis amigos?

      -

      A: Sí, puedes jugar Garena Free Fire Mod v1.47.0 APK con tus amigos, siempre y cuando también tienen la misma versión modded del juego instalado en sus dispositivos. Puedes unirte o crear un equipo con ellos y jugar juntos en cualquier modo del juego.

      -

      Q: ¿Puedo utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados?

      -

      A: No, no debe utilizar Garena Free Fire Mod v1.47.0 APK en los partidos clasificados, ya que esto puede resultar en una prohibición o suspensión del juego por violar los términos del servicio o hacer trampa. Solo debes usar las características mod en partidos casuales o habitaciones personalizadas para fines de diversión y entretenimiento.

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py deleted file mode 100644 index 139995ac3f109a82664e4913f7ebc32ecf7617e1..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/cli/command_context.py +++ /dev/null @@ -1,27 +0,0 @@ -from contextlib import ExitStack, contextmanager -from typing import ContextManager, Generator, TypeVar - -_T = TypeVar("_T", covariant=True) - - -class CommandContextMixIn: - def __init__(self) -> None: - super().__init__() - self._in_main_context = False - self._main_context = ExitStack() - - @contextmanager - def main_context(self) -> Generator[None, None, None]: - assert not self._in_main_context - - self._in_main_context = True - try: - with self._main_context: - yield - finally: - self._in_main_context = False - - def enter_context(self, context_provider: ContextManager[_T]) -> _T: - assert self._in_main_context - - return self._main_context.enter_context(context_provider) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py deleted file mode 100644 index 88fcb9295164f4e18827ef61fff6723e94ef7381..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/control.py +++ /dev/null @@ -1,225 +0,0 @@ -import sys -import time -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union - -if sys.version_info >= (3, 8): - from typing import Final -else: - from pip._vendor.typing_extensions import Final # pragma: no cover - -from .segment import ControlCode, ControlType, Segment - -if TYPE_CHECKING: - from .console import Console, ConsoleOptions, RenderResult - -STRIP_CONTROL_CODES: Final = [ - 7, # Bell - 8, # Backspace - 11, # Vertical tab - 12, # Form feed - 13, # Carriage return -] -_CONTROL_STRIP_TRANSLATE: Final = { - _codepoint: None for _codepoint in STRIP_CONTROL_CODES -} - -CONTROL_ESCAPE: Final = { - 7: "\\a", - 8: "\\b", - 11: "\\v", - 12: "\\f", - 13: "\\r", -} - -CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = { - ControlType.BELL: lambda: "\x07", - ControlType.CARRIAGE_RETURN: lambda: "\r", - ControlType.HOME: lambda: "\x1b[H", - ControlType.CLEAR: lambda: "\x1b[2J", - ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h", - ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l", - ControlType.SHOW_CURSOR: lambda: "\x1b[?25h", - ControlType.HIDE_CURSOR: lambda: "\x1b[?25l", - ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A", - ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B", - ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C", - ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D", - ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G", - ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K", - ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H", - ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07", -} - - -class Control: - """A renderable that inserts a control code (non printable but may move cursor). - - Args: - *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a - tuple of ControlType and an integer parameter - """ - - __slots__ = ["segment"] - - def __init__(self, *codes: Union[ControlType, ControlCode]) -> None: - control_codes: List[ControlCode] = [ - (code,) if isinstance(code, ControlType) else code for code in codes - ] - _format_map = CONTROL_CODES_FORMAT - rendered_codes = "".join( - _format_map[code](*parameters) for code, *parameters in control_codes - ) - self.segment = Segment(rendered_codes, None, control_codes) - - @classmethod - def bell(cls) -> "Control": - """Ring the 'bell'.""" - return cls(ControlType.BELL) - - @classmethod - def home(cls) -> "Control": - """Move cursor to 'home' position.""" - return cls(ControlType.HOME) - - @classmethod - def move(cls, x: int = 0, y: int = 0) -> "Control": - """Move cursor relative to current position. - - Args: - x (int): X offset. - y (int): Y offset. - - Returns: - ~Control: Control object. - - """ - - def get_codes() -> Iterable[ControlCode]: - control = ControlType - if x: - yield ( - control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD, - abs(x), - ) - if y: - yield ( - control.CURSOR_DOWN if y > 0 else control.CURSOR_UP, - abs(y), - ) - - control = cls(*get_codes()) - return control - - @classmethod - def move_to_column(cls, x: int, y: int = 0) -> "Control": - """Move to the given column, optionally add offset to row. - - Returns: - x (int): absolute x (column) - y (int): optional y offset (row) - - Returns: - ~Control: Control object. - """ - - return ( - cls( - (ControlType.CURSOR_MOVE_TO_COLUMN, x), - ( - ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP, - abs(y), - ), - ) - if y - else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x)) - ) - - @classmethod - def move_to(cls, x: int, y: int) -> "Control": - """Move cursor to absolute position. - - Args: - x (int): x offset (column) - y (int): y offset (row) - - Returns: - ~Control: Control object. - """ - return cls((ControlType.CURSOR_MOVE_TO, x, y)) - - @classmethod - def clear(cls) -> "Control": - """Clear the screen.""" - return cls(ControlType.CLEAR) - - @classmethod - def show_cursor(cls, show: bool) -> "Control": - """Show or hide the cursor.""" - return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR) - - @classmethod - def alt_screen(cls, enable: bool) -> "Control": - """Enable or disable alt screen.""" - if enable: - return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME) - else: - return cls(ControlType.DISABLE_ALT_SCREEN) - - @classmethod - def title(cls, title: str) -> "Control": - """Set the terminal window title - - Args: - title (str): The new terminal window title - """ - return cls((ControlType.SET_WINDOW_TITLE, title)) - - def __str__(self) -> str: - return self.segment.text - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - if self.segment.text: - yield self.segment - - -def strip_control_codes( - text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE -) -> str: - """Remove control codes from text. - - Args: - text (str): A string possibly contain control codes. - - Returns: - str: String with control codes removed. - """ - return text.translate(_translate_table) - - -def escape_control_codes( - text: str, - _translate_table: Dict[int, str] = CONTROL_ESCAPE, -) -> str: - """Replace control codes with their "escaped" equivalent in the given text. - (e.g. "\b" becomes "\\b") - - Args: - text (str): A string possibly containing control codes. - - Returns: - str: String with control codes replaced with their escaped version. - """ - return text.translate(_translate_table) - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich.console import Console - - console = Console() - console.print("Look at the title of your terminal window ^") - # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!"))) - for i in range(10): - console.set_window_title("🚀 Loading" + "." * i) - time.sleep(0.5) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py deleted file mode 100644 index c9d134cc3cedae929e5bef2b5547f7e33dc10a52..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/scope.py +++ /dev/null @@ -1,86 +0,0 @@ -from collections.abc import Mapping -from typing import TYPE_CHECKING, Any, Optional, Tuple - -from .highlighter import ReprHighlighter -from .panel import Panel -from .pretty import Pretty -from .table import Table -from .text import Text, TextType - -if TYPE_CHECKING: - from .console import ConsoleRenderable - - -def render_scope( - scope: "Mapping[str, Any]", - *, - title: Optional[TextType] = None, - sort_keys: bool = True, - indent_guides: bool = False, - max_length: Optional[int] = None, - max_string: Optional[int] = None, -) -> "ConsoleRenderable": - """Render python variables in a given scope. - - Args: - scope (Mapping): A mapping containing variable names and values. - title (str, optional): Optional title. Defaults to None. - sort_keys (bool, optional): Enable sorting of items. Defaults to True. - indent_guides (bool, optional): Enable indentation guides. Defaults to False. - max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation. - Defaults to None. - max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None. - - Returns: - ConsoleRenderable: A renderable object. - """ - highlighter = ReprHighlighter() - items_table = Table.grid(padding=(0, 1), expand=False) - items_table.add_column(justify="right") - - def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]: - """Sort special variables first, then alphabetically.""" - key, _ = item - return (not key.startswith("__"), key.lower()) - - items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items() - for key, value in items: - key_text = Text.assemble( - (key, "scope.key.special" if key.startswith("__") else "scope.key"), - (" =", "scope.equals"), - ) - items_table.add_row( - key_text, - Pretty( - value, - highlighter=highlighter, - indent_guides=indent_guides, - max_length=max_length, - max_string=max_string, - ), - ) - return Panel.fit( - items_table, - title=title, - border_style="scope.border", - padding=(0, 1), - ) - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich import print - - print() - - def test(foo: float, bar: float) -> None: - list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"] - dict_of_things = { - "version": "1.1", - "method": "confirmFruitPurchase", - "params": [["apple", "orange", "mangoes", "pomelo"], 1.123], - "id": "194521489", - } - print(render_scope(locals(), title="[i]locals", sort_keys=False)) - - test(20.3423, 3.1427) - print() diff --git a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md b/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md deleted file mode 100644 index d332893cc7009aa432b1512dc77aac3d8fda6d1d..0000000000000000000000000000000000000000 --- a/spaces/CVMX-jaca-tonos/Identificar-lenguas-y-frases/NOTES.md +++ /dev/null @@ -1,65 +0,0 @@ - - -# Things that might be relevant - -## Trained models - -ESPnet model for Yoloxochitl Mixtec - - Huggingface Hub page https://huggingface.co/espnet/ftshijt_espnet2_asr_yolo_mixtec_transformer - - Model source code https://github.com/espnet/espnet/tree/master/egs/yoloxochitl_mixtec/asr1 - - Colab notebook to setup and apply the model https://colab.research.google.com/drive/1ieoW2b3ERydjaaWuhVPBP_v2QqqWsC1Q?usp=sharing - -Coqui model for Yoloxochitl Mixtec - - Huggingface Hub page - - Coqui page https://coqui.ai/mixtec/jemeyer/v1.0.0 - - Colab notebook to setup and apply the model https://colab.research.google.com/drive/1b1SujEGC_F3XhvUCuUyZK_tyUkEaFZ7D?usp=sharing#scrollTo=6IvRFke4Ckpz - -Spanish ASR models - - XLS-R model based on CV8 with LM https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-spanish - - XLSR model based on CV6 with LM https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-spanish - - XLSR model based on Librispeech https://huggingface.co/IIC/wav2vec2-spanish-multilibrispeech - -Speechbrain Language identification on Common Language (from Common Voice 6/7?) - - source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/CommonLanguage - - HF Hub model page https://huggingface.co/speechbrain/lang-id-commonlanguage_ecapa - - HF Hub space https://huggingface.co/spaces/akhaliq/Speechbrain-audio-classification - -Speechbrain Language identification on VoxLingua - - source code https://github.com/speechbrain/speechbrain/tree/develop/recipes/VoxLingua107/lang_id - - HF Hub model page https://huggingface.co/speechbrain/lang-id-voxlingua107-ecapa - - -## Corpora - -OpenSLR89 https://www.openslr.org/89/ - -Common Language https://huggingface.co/datasets/common_language - -VoxLingua http://bark.phon.ioc.ee/voxlingua107/ - -Multilibrispeech https://huggingface.co/datasets/multilingual_librispeech - - -# Possible demos - -## Simple categorization of utterances - -A few example files are provided for each language, and the user can record their own. -The predicted confidence of each class label is shown. - -## Segmentation and identification - -Recordings with alternating languages in a single audio file, provided examples or the user can record. -Some voice activity detection to split the audio, then predict language of each piece - -## Identication and transcription - -Example files for each language separately. -The lang-id model predicts what language it is. -The corresponding ASR model produces a transcript. - -## Segmentation, identification and transcription - -Recordings with alternating languages in a single audio file. -Use voice activity detection to split the audio, then predict the language of each piece -Use the corresponding ASR model to produce a transcript of each piece to display. \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/generate_mk.py b/spaces/CVPR/LIVE/thrust/generate_mk.py deleted file mode 100644 index 84071338ccfdd99be55027c8046fe46c56e5a65b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/generate_mk.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python -# Generate set of projects mk files. -# Usage: python generate_mk.py PROJECTS_MK_DIR THRUST_SOURCE_DIR -# The program scans through unit tests and examples in THRUST_SOURCE_DIR -# and generates project mk for each of the tests and examples in PROJECTS_MK_DIR -# A single example or unit test source file generates its own executable -# This program is called by a top level Makefile, but can also be used stand-alone for debugging -# This program also generates testing.mk, examples.mk and dependencies.mk -from __future__ import print_function -import sys -import shutil as sh -import os -import glob -import re - -test_template = """ -TEST_SRC := %(TEST_SRC)s -TEST_NAME := %(TEST_NAME)s -include $(ROOTDIR)/thrust/internal/build/generic_test.mk -""" -example_template = """ -EXAMPLE_SRC := %(EXAMPLE_SRC)s -EXAMPLE_NAME := %(EXAMPLE_NAME)s -include $(ROOTDIR)/thrust/internal/build/generic_example.mk -""" - -def Glob(pattern, directory,exclude='\B'): - src = glob.glob(os.path.join(directory,pattern)) - p = re.compile(exclude) - src = [s for s in src if not p.match(s)] - return src - - -def generate_test_mk(mk_path, test_path, group, TEST_DIR): - print('Generating makefiles in "'+mk_path+'" for tests in "'+test_path+'"') - src_cu = Glob("*.cu", test_path, ".*testframework.cu$") - src_cxx = Glob("*.cpp", test_path) - src_cu.sort(); - src_cxx.sort(); - src_all = src_cu + src_cxx; - tests_all = [] - dependencies_all = [] - for s in src_all: - fn = os.path.splitext(os.path.basename(s)); - t = "thrust."+group+"."+fn[0] - e = fn[1] - mkfile = test_template % {"TEST_SRC" : s, "TEST_NAME" : t} - f = open(os.path.join(mk_path,t+".mk"), 'w') - f.write(mkfile) - f.close() - tests_all.append(os.path.join(mk_path,t)) - dependencies_all.append(t+": testframework") - return [tests_all, dependencies_all] - -def generate_example_mk(mk_path, example_path, group, EXAMPLE_DIR): - print('Generating makefiles in "'+mk_path+'" for examples in "'+example_path+'"') - src_cu = Glob("*.cu", example_path) - src_cxx = Glob("*.cpp", example_path) - src_cu.sort(); - src_cxx.sort(); - src_all = src_cu + src_cxx; - examples_all = [] - for s in src_all: - fn = os.path.splitext(os.path.basename(s)); - t = "thrust."+group+"."+fn[0] - e = fn[1] - mkfile = example_template % {"EXAMPLE_SRC" : s, "EXAMPLE_NAME" : t} - f = open(os.path.join(mk_path,t+".mk"), 'w') - f.write(mkfile) - f.close() - examples_all.append(os.path.join(mk_path,t)) - return examples_all - - -## relpath : backported from os.relpath form python 2.6+ -def relpath(path, start): - """Return a relative version of a path""" - - import posixpath - if not path: - raise ValueError("no path specified") - start_list = posixpath.abspath(start).split(posixpath.sep) - path_list = posixpath.abspath(path).split(posixpath.sep) - # Work out how much of the filepath is shared by start and path. - i = len(posixpath.commonprefix([start_list, path_list])) - rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:] - if not rel_list: - return posixpath.curdir - return posixpath.join(*rel_list) - -mk_path=sys.argv[1] -REL_DIR="../../" -if (len(sys.argv) > 2): - root_path=sys.argv[2]; - mk_path = relpath(mk_path, root_path) - REL_DIR = relpath(root_path,mk_path) - -try: - sh.rmtree(mk_path) -except: - pass -os.makedirs(mk_path) - -tests_all, dependencies_all = generate_test_mk(mk_path, "testing/", "test", REL_DIR) -tests_cu, dependencies_cu = generate_test_mk(mk_path, "testing/cuda/", "test.cuda", REL_DIR) -tests_all.extend(tests_cu) -dependencies_all.extend(dependencies_cu) - -testing_mk = "" - -for t in tests_all: - testing_mk += "PROJECTS += "+t+"\n" -testing_mk += "PROJECTS += internal/build/testframework\n" - - -f = open(os.path.join(mk_path,"testing.mk"),'w') -f.write(testing_mk) -f.close() - -dependencies_mk = "" -for d in dependencies_all: - dependencies_mk += d + "\n" - -f = open(os.path.join(mk_path,"dependencies.mk"),'w') -f.write(dependencies_mk) -f.close() - - -examples_mk = "" -examples_all = generate_example_mk(mk_path, "examples/", "example", REL_DIR) -examples_cuda = generate_example_mk(mk_path, "examples/cuda/", "example.cuda", REL_DIR) -examples_all.extend(examples_cuda) -for e in examples_all: - examples_mk += "PROJECTS += "+e+"\n" - -f = open(os.path.join(mk_path,"examples.mk"),'w') -f.write(examples_mk) -f.close() - - - - - - - - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h deleted file mode 100644 index a8736bd75d06e54d9158baeb2504162d75312885..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/transform_reduce.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits transform_reduce -#include - diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py deleted file mode 100644 index 80c25bb8fde7844c994bfc1f4ae1a2d960cbf3d6..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py +++ /dev/null @@ -1,83 +0,0 @@ -from mmcv.cnn.bricks import build_plugin_layer -from mmcv.runner import force_fp32 - -from mmdet.models.builder import ROI_EXTRACTORS -from .base_roi_extractor import BaseRoIExtractor - - -@ROI_EXTRACTORS.register_module() -class GenericRoIExtractor(BaseRoIExtractor): - """Extract RoI features from all level feature maps levels. - - This is the implementation of `A novel Region of Interest Extraction Layer - for Instance Segmentation `_. - - Args: - aggregation (str): The method to aggregate multiple feature maps. - Options are 'sum', 'concat'. Default: 'sum'. - pre_cfg (dict | None): Specify pre-processing modules. Default: None. - post_cfg (dict | None): Specify post-processing modules. Default: None. - kwargs (keyword arguments): Arguments that are the same - as :class:`BaseRoIExtractor`. - """ - - def __init__(self, - aggregation='sum', - pre_cfg=None, - post_cfg=None, - **kwargs): - super(GenericRoIExtractor, self).__init__(**kwargs) - - assert aggregation in ['sum', 'concat'] - - self.aggregation = aggregation - self.with_post = post_cfg is not None - self.with_pre = pre_cfg is not None - # build pre/post processing modules - if self.with_post: - self.post_module = build_plugin_layer(post_cfg, '_post_module')[1] - if self.with_pre: - self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1] - - @force_fp32(apply_to=('feats', ), out_fp16=True) - def forward(self, feats, rois, roi_scale_factor=None): - """Forward function.""" - if len(feats) == 1: - return self.roi_layers[0](feats[0], rois) - - out_size = self.roi_layers[0].output_size - num_levels = len(feats) - roi_feats = feats[0].new_zeros( - rois.size(0), self.out_channels, *out_size) - - # some times rois is an empty tensor - if roi_feats.shape[0] == 0: - return roi_feats - - if roi_scale_factor is not None: - rois = self.roi_rescale(rois, roi_scale_factor) - - # mark the starting channels for concat mode - start_channels = 0 - for i in range(num_levels): - roi_feats_t = self.roi_layers[i](feats[i], rois) - end_channels = start_channels + roi_feats_t.size(1) - if self.with_pre: - # apply pre-processing to a RoI extracted from each layer - roi_feats_t = self.pre_module(roi_feats_t) - if self.aggregation == 'sum': - # and sum them all - roi_feats += roi_feats_t - else: - # and concat them along channel dimension - roi_feats[:, start_channels:end_channels] = roi_feats_t - # update channels starting position - start_channels = end_channels - # check if concat channels match at the end - if self.aggregation == 'concat': - assert start_channels == self.out_channels - - if self.with_post: - # apply post-processing before return the result - roi_feats = self.post_module(roi_feats) - return roi_feats diff --git a/spaces/ChristopherMarais/Andrew_Alpha/README.md b/spaces/ChristopherMarais/Andrew_Alpha/README.md deleted file mode 100644 index 7ca08e015d07910a8f6abddf12db2ad62976559a..0000000000000000000000000000000000000000 --- a/spaces/ChristopherMarais/Andrew_Alpha/README.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: Andrew Alpha -emoji: 👁 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: other ---- - -This is a proof-of-concept version of our Artificial Intelligence model to classify images of bark and ambrosia beetles. As an input, please use an image of a specimen, or a group of specimens, ideally in ethanol with a white background. - -This proof-of-concept model has been trained on a preliminary sample of 12 species: Coccotypes dactyliperda, Hylesinus varius, Monarthrum fasciatum, Phloeosinus dentatus, Pityophthorus juglandis, Platypus cylindrus, Pycnarthrum hispidium, Scolytodes schwarzi, Xyleborinus saxesenii, Xyleborus affinis, Xylosandrus compactus, and Xylosandrus crassiusculus. - -For correct interpretation of the results, it is important to consider not just the suggested name, but also the associated probability. Identification of other species is coming soon, as soon as they are added to the training set. - -You can find example photos [here](https://ambrosiasymbiosis.org/automated_identification/examples.html) diff --git a/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py b/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py deleted file mode 100644 index 9fa8ec821f701d7841432e498a11ac9dd017978c..0000000000000000000000000000000000000000 --- a/spaces/CofAI/chat/g4f/Provider/Providers/Phind.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import json -import time -import subprocess - -from ...typing import sha256, Dict, get_type_hints - -url = 'https://phind.com' -model = ['gpt-4'] -supports_stream = True - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - - path = os.path.dirname(os.path.realpath(__file__)) - config = json.dumps({ - 'model': model, - 'messages': messages}, separators=(',', ':')) - - cmd = ['python', f'{path}/helpers/phind.py', config] - - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - for line in iter(p.stdout.readline, b''): - if b'Just a moment...' in line: - os.system('clear' if os.name == 'posix' else 'cls') - yield 'Clouflare error, please try again...' - os._exit(0) - - else: - if b'ping - 2023-' in line: - continue - - yield line.decode('cp1251') #[:-1] - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py deleted file mode 100644 index 9043c54b2cc8a27a37702649c8acff865f741790..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/alfashape.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -from scipy.spatial import Delaunay -from .area_of_polygon import area_of_polygon_crd -import networkx as nx - -def sqrt_sum(a, b): - x = (a[0]-b[0]) - y = (a[1]-b[1]) - return np.sqrt(x*x+y*y) - -def shapeToSomePolygons(shape): - G = nx.Graph() - allnodes = set() - for line in shape: - G.add_nodes_from(line) - G.add_edge(line[0], line[1]) - allnodes.add(line[0]) - allnodes.add(line[1]) - - result = [] - - while allnodes: - node = allnodes.pop() - new_node = next(iter(G[node]), None) - if not new_node: continue - - G.remove_edge(node, new_node) - temp = nx.shortest_path(G, node, new_node) - for j,t in enumerate(temp): - if t in allnodes: - allnodes.remove(t) - result.append(temp) - return result - -def getAlfaShapes(pts,alfas=1): - tri_ind = [(0,1),(1,2),(2,0)] - tri = Delaunay(pts) - lenghts={} - for s in tri.simplices: - for ind in tri_ind: - a = pts[s[ind[0]]] - b = pts[s[ind[1]]] - # print('a---', a) - # print('b---', b) - line = (a, b) - # line = ((a[0], a[1]), (b[0], b[1])) - lenghts[line] = sqrt_sum(a, b) - - ls = sorted(lenghts.values()) - - mean_length = np.mean(ls) - mean_length_index = ls.index(next(filter(lambda x: x>=mean_length, ls))) - magic_numbers = [ls[i] for i in range(mean_length_index, len(ls))] - magic_numbers[0] = 0 - sum_magic = np.sum(magic_numbers) - for i in range(2, len(magic_numbers)): - magic_numbers[i] += magic_numbers[i-1] - magic_numbers = [m /sum_magic for m in magic_numbers] - - rez = [] - for alfa in alfas: - i = magic_numbers.index(next(filter(lambda z: z > alfa, magic_numbers), magic_numbers[-1])) - av_length = ls[mean_length_index+i] - - lines = {} - - for s in tri.simplices: - used = True - for ind in tri_ind: - if lenghts[(pts[s[ind[0]]], pts[s[ind[1]]])] > av_length: - used = False - break - if used == False: continue - - for ind in tri_ind: - i,j= s[ind[0]],s[ind[1]] - line = (pts[min(i,j)], pts[max(i,j)]) - lines[line] = line in lines - - good_lines = [] - for v in lines: - if not lines[v]: - good_lines.append(v) - - result = shapeToSomePolygons(good_lines) - result.sort(key=area_of_polygon_crd, reverse=True) - rez.append(result) - return rez - diff --git a/spaces/Dagfinn1962/stablediffusion-models/main.css b/spaces/Dagfinn1962/stablediffusion-models/main.css deleted file mode 100644 index 9d9e5d256b872645ee28c1912e2a9d476131f51a..0000000000000000000000000000000000000000 --- a/spaces/Dagfinn1962/stablediffusion-models/main.css +++ /dev/null @@ -1,57 +0,0 @@ -body { - background-color: #214d09; - width: 100%; - color: #FFFFFF; -} - -h3 { - color: #FFFFF; - text-align: center; - font-family: verdana; - font-size: 24px; - border: 1px solid #FFFFFF; - border-radius: 10px; -} - -p { - font-family: verdana; - font-size: 14px; -} - -label { - font-family: verdana; - color: #000000; - font-weight: 700; - font-size: 14px; - border: 1px solid #000000; -} - -gr.Textbox { - font-family: verdana; - background-color: #279700; - color: #000000; - font-weight: 700; - font-size: 14px; - border: 1px solid #FFFFFF; - border-radius: 6px; -} - -gr.Botton { - font-family: verdana; - background-color: #279700; - color: #FFFFFF; - font-weight: 700; - font-size: 14px; - border: 1px solid #000000; - border-radius: 6px; -} - -a a:active a.hover - { - font-family: verdana; - color: #572430; - text-decoration: none; - font-weight: 700; - font-size: 14px; - -} \ No newline at end of file diff --git a/spaces/DaleChen/AutoGPT/autogpt/config/config.py b/spaces/DaleChen/AutoGPT/autogpt/config/config.py deleted file mode 100644 index 4b53df10e8d2832be7ffb321d9036aec5a47a79d..0000000000000000000000000000000000000000 --- a/spaces/DaleChen/AutoGPT/autogpt/config/config.py +++ /dev/null @@ -1,251 +0,0 @@ -"""Configuration class to store the state of bools for different scripts access.""" -import os - -import openai -import yaml -from colorama import Fore -from dotenv import load_dotenv - -from autogpt.config.singleton import Singleton - -load_dotenv(verbose=True) - - -class Config(metaclass=Singleton): - """ - Configuration class to store the state of bools for different scripts access. - """ - - def __init__(self) -> None: - """Initialize the Config class""" - self.debug_mode = False - self.continuous_mode = False - self.continuous_limit = 0 - self.speak_mode = False - self.skip_reprompt = False - self.allow_downloads = False - self.skip_news = False - - self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml") - self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo") - self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4") - self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) - self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) - self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) - - self.openai_api_key = os.getenv("OPENAI_API_KEY") - self.temperature = float(os.getenv("TEMPERATURE", "1")) - self.use_azure = os.getenv("USE_AZURE") == "True" - self.execute_local_commands = ( - os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True" - ) - self.restrict_to_workspace = ( - os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True" - ) - - if self.use_azure: - self.load_azure_config() - openai.api_type = self.openai_api_type - openai.api_base = self.openai_api_base - openai.api_version = self.openai_api_version - - self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY") - self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID") - self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID") - - self.use_mac_os_tts = False - self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS") - - self.use_brian_tts = False - self.use_brian_tts = os.getenv("USE_BRIAN_TTS") - - self.github_api_key = os.getenv("GITHUB_API_KEY") - self.github_username = os.getenv("GITHUB_USERNAME") - - self.google_api_key = os.getenv("GOOGLE_API_KEY") - self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") - - self.pinecone_api_key = os.getenv("PINECONE_API_KEY") - self.pinecone_region = os.getenv("PINECONE_ENV") - - self.weaviate_host = os.getenv("WEAVIATE_HOST") - self.weaviate_port = os.getenv("WEAVIATE_PORT") - self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http") - self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None) - self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None) - self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None) - self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH") - self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None) - self.use_weaviate_embedded = ( - os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True" - ) - - # milvus configuration, e.g., localhost:19530. - self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530") - self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt") - - self.image_provider = os.getenv("IMAGE_PROVIDER") - self.image_size = int(os.getenv("IMAGE_SIZE", 256)) - self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") - self.huggingface_image_model = os.getenv( - "HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4" - ) - self.huggingface_audio_to_text_model = os.getenv( - "HUGGINGFACE_AUDIO_TO_TEXT_MODEL" - ) - self.sd_webui_url = os.getenv("SD_WEBUI_URL", "http://localhost:7860") - self.sd_webui_auth = os.getenv("SD_WEBUI_AUTH") - - # Selenium browser settings - self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome") - self.selenium_headless = os.getenv("HEADLESS_BROWSER", "True") == "True" - - # User agent header to use when making HTTP requests - # Some websites might just completely deny request with an error code if - # no user agent was found. - self.user_agent = os.getenv( - "USER_AGENT", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36" - " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", - ) - - self.redis_host = os.getenv("REDIS_HOST", "localhost") - self.redis_port = os.getenv("REDIS_PORT", "6379") - self.redis_password = os.getenv("REDIS_PASSWORD", "") - self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" - self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt") - # Note that indexes must be created on db 0 in redis, this is not configurable. - - self.memory_backend = os.getenv("MEMORY_BACKEND", "local") - # Initialize the OpenAI API client - openai.api_key = self.openai_api_key - - def get_azure_deployment_id_for_model(self, model: str) -> str: - """ - Returns the relevant deployment id for the model specified. - - Parameters: - model(str): The model to map to the deployment id. - - Returns: - The matching deployment id if found, otherwise an empty string. - """ - if model == self.fast_llm_model: - return self.azure_model_to_deployment_id_map[ - "fast_llm_model_deployment_id" - ] # type: ignore - elif model == self.smart_llm_model: - return self.azure_model_to_deployment_id_map[ - "smart_llm_model_deployment_id" - ] # type: ignore - elif model == "text-embedding-ada-002": - return self.azure_model_to_deployment_id_map[ - "embedding_model_deployment_id" - ] # type: ignore - else: - return "" - - AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml") - - def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None: - """ - Loads the configuration parameters for Azure hosting from the specified file - path as a yaml file. - - Parameters: - config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml" - - Returns: - None - """ - try: - with open(config_file) as file: - config_params = yaml.load(file, Loader=yaml.FullLoader) - except FileNotFoundError: - config_params = {} - self.openai_api_type = config_params.get("azure_api_type") or "azure" - self.openai_api_base = config_params.get("azure_api_base") or "" - self.openai_api_version = ( - config_params.get("azure_api_version") or "2023-03-15-preview" - ) - self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", []) - - def set_continuous_mode(self, value: bool) -> None: - """Set the continuous mode value.""" - self.continuous_mode = value - - def set_continuous_limit(self, value: int) -> None: - """Set the continuous limit value.""" - self.continuous_limit = value - - def set_speak_mode(self, value: bool) -> None: - """Set the speak mode value.""" - self.speak_mode = value - - def set_fast_llm_model(self, value: str) -> None: - """Set the fast LLM model value.""" - self.fast_llm_model = value - - def set_smart_llm_model(self, value: str) -> None: - """Set the smart LLM model value.""" - self.smart_llm_model = value - - def set_fast_token_limit(self, value: int) -> None: - """Set the fast token limit value.""" - self.fast_token_limit = value - - def set_smart_token_limit(self, value: int) -> None: - """Set the smart token limit value.""" - self.smart_token_limit = value - - def set_browse_chunk_max_length(self, value: int) -> None: - """Set the browse_website command chunk max length value.""" - self.browse_chunk_max_length = value - - def set_openai_api_key(self, value: str) -> None: - """Set the OpenAI API key value.""" - self.openai_api_key = value - - def set_elevenlabs_api_key(self, value: str) -> None: - """Set the ElevenLabs API key value.""" - self.elevenlabs_api_key = value - - def set_elevenlabs_voice_1_id(self, value: str) -> None: - """Set the ElevenLabs Voice 1 ID value.""" - self.elevenlabs_voice_1_id = value - - def set_elevenlabs_voice_2_id(self, value: str) -> None: - """Set the ElevenLabs Voice 2 ID value.""" - self.elevenlabs_voice_2_id = value - - def set_google_api_key(self, value: str) -> None: - """Set the Google API key value.""" - self.google_api_key = value - - def set_custom_search_engine_id(self, value: str) -> None: - """Set the custom search engine id value.""" - self.custom_search_engine_id = value - - def set_pinecone_api_key(self, value: str) -> None: - """Set the Pinecone API key value.""" - self.pinecone_api_key = value - - def set_pinecone_region(self, value: str) -> None: - """Set the Pinecone region value.""" - self.pinecone_region = value - - def set_debug_mode(self, value: bool) -> None: - """Set the debug mode value.""" - self.debug_mode = value - - -def check_openai_api_key() -> None: - """Check if the OpenAI API key is set in config.py or as an environment variable.""" - cfg = Config() - if not cfg.openai_api_key: - print( - Fore.RED - + "Please set your OpenAI API key in .env or as an environment variable." - ) - print("You can get your key from https://platform.openai.com/account/api-keys") - exit(1) diff --git a/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py b/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py deleted file mode 100644 index ea84efd8ca9489b40919ecd571813fe954b078e3..0000000000000000000000000000000000000000 --- a/spaces/DaleChen/AutoGPT/autogpt/speech/eleven_labs.py +++ /dev/null @@ -1,86 +0,0 @@ -"""ElevenLabs speech module""" -import os - -import requests -from playsound import playsound - -from autogpt.config import Config -from autogpt.speech.base import VoiceBase - -PLACEHOLDERS = {"your-voice-id"} - - -class ElevenLabsSpeech(VoiceBase): - """ElevenLabs speech class""" - - def _setup(self) -> None: - """Set up the voices, API key, etc. - - Returns: - None: None - """ - - cfg = Config() - default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] - voice_options = { - "Rachel": "21m00Tcm4TlvDq8ikWAM", - "Domi": "AZnzlk1XvdvUeBnXmlld", - "Bella": "EXAVITQu4vr4xnSDxMaL", - "Antoni": "ErXwobaYiN019PkySvjV", - "Elli": "MF3mGyEYCl7XYWbV9V6O", - "Josh": "TxGEqnHWrfWFTfGW9XjX", - "Arnold": "VR6AewLTigWG4xSOukaG", - "Adam": "pNInz6obpgDQGcFmaJgB", - "Sam": "yoZ06aMxZJJ28mfd3POQ", - } - self._headers = { - "Content-Type": "application/json", - "xi-api-key": cfg.elevenlabs_api_key, - } - self._voices = default_voices.copy() - if cfg.elevenlabs_voice_1_id in voice_options: - cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id] - if cfg.elevenlabs_voice_2_id in voice_options: - cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id] - self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0) - self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1) - - def _use_custom_voice(self, voice, voice_index) -> None: - """Use a custom voice if provided and not a placeholder - - Args: - voice (str): The voice ID - voice_index (int): The voice index - - Returns: - None: None - """ - # Placeholder values that should be treated as empty - if voice and voice not in PLACEHOLDERS: - self._voices[voice_index] = voice - - def _speech(self, text: str, voice_index: int = 0) -> bool: - """Speak text using elevenlabs.io's API - - Args: - text (str): The text to speak - voice_index (int, optional): The voice to use. Defaults to 0. - - Returns: - bool: True if the request was successful, False otherwise - """ - tts_url = ( - f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}" - ) - response = requests.post(tts_url, headers=self._headers, json={"text": text}) - - if response.status_code == 200: - with open("speech.mpeg", "wb") as f: - f.write(response.content) - playsound("speech.mpeg", True) - os.remove("speech.mpeg") - return True - else: - print("Request failed with status code:", response.status_code) - print("Response content:", response.content) - return False diff --git a/spaces/Detomo/ai-avatar-backend/helpers/tts.js b/spaces/Detomo/ai-avatar-backend/helpers/tts.js deleted file mode 100644 index 8a4f54f7f6e880da4deb7b874f90c92e77c99a75..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-avatar-backend/helpers/tts.js +++ /dev/null @@ -1,92 +0,0 @@ -// azure-cognitiveservices-speech.js -require('dotenv').config() -const sdk = require('microsoft-cognitiveservices-speech-sdk'); -const blendShapeNames = require('./blendshapeNames'); -const _ = require('lodash'); -const voicesMap = { - 'en-US': 'en-US-AmberNeural', - 'ja-JP': 'ja-JP-MayuNeural', - 'vi-VN': 'vi-VN-NamMinhNeural', -}; - -let SSML = ` - - - __TEXT__ - -`; - -const key = process.env.AZURE_KEY; -const region = process.env.AZURE_REGION; - -/** - * Node.js server code to convert text to speech - * @returns stream - * @param {*} text text to convert to audio/speech - * @param language - */ -const textToSpeech = async (text, language)=> { - - // convert callback function to promise - return new Promise((resolve, reject) => { - - const voice = voicesMap[language]; - let ssml = SSML.replace("__TEXT__", text).replace("", ``); - - - const speechConfig = sdk.SpeechConfig.fromSubscription(key, region); - speechConfig.speechSynthesisOutputFormat = 5; // mp3 - - let audioConfig = null; - - // if (filename) { - let randomString = Math.random().toString(36).slice(2, 7); - let filename = `./public/speech-${randomString}.mp3`; - audioConfig = sdk.AudioConfig.fromAudioFileOutput(filename); - // } - - let blendData = []; - let timeStep = 1/60; - let timeStamp = 0; - - const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig); - - // Subscribes to viseme received event - synthesizer.visemeReceived = function (s, e) { - - // `Animation` is an xml string for SVG or a json string for blend shapes - var animation = JSON.parse(e.animation); - - _.each(animation.BlendShapes, blendArray => { - - let blend = {}; - _.each(blendShapeNames, (shapeName, i) => { - blend[shapeName] = blendArray[i]; - }); - - blendData.push({ - time: timeStamp, - blendshapes: blend - }); - timeStamp += timeStep; - }); - - } - - - synthesizer.speakSsmlAsync( - ssml, - result => { - - synthesizer.close(); - resolve({blendData, filename: `/speech-${randomString}.mp3`}); - - }, - error => { - synthesizer.close(); - reject(error); - }); - }); -}; - -module.exports = textToSpeech; \ No newline at end of file diff --git a/spaces/Detomo/detect_greeting_app/app.py b/spaces/Detomo/detect_greeting_app/app.py deleted file mode 100644 index 5b7f4454da1f26ef157d3d2f66a04cd96ae07a6f..0000000000000000000000000000000000000000 --- a/spaces/Detomo/detect_greeting_app/app.py +++ /dev/null @@ -1,80 +0,0 @@ -import gradio as gr -import time -from faster_whisper import WhisperModel -from utils import ffmpeg_read, stt, greeting_list -from sentence_transformers import SentenceTransformer, util -import torch - -whisper_models = ["tiny", "base", "small", "medium", "large-v1", "large-v2"] -audio_model = WhisperModel("base", compute_type="int8", device="cpu") -text_model = SentenceTransformer('all-MiniLM-L6-v2') -corpus_embeddings = torch.load('corpus_embeddings.pt') -model_type = "whisper" -title= "Greeting detection demo app" - -def speech_to_text(upload_audio): - """ - Transcribe audio using whisper model. - """ - # Transcribe audio - if model_type == "whisper": - transcribe_options = dict(task="transcribe", language="ja", beam_size=5, best_of=5, vad_filter=True) - segments_raw, info = audio_model.transcribe(upload_audio, **transcribe_options) - segments = [segment.text for segment in segments_raw] - return ' '.join(segments) - else: - text = stt(upload_audio) - return text - -def voice_detect(audio, recongnize_text=""): - """ - Transcribe audio using whisper model. - """ - # time.sleep(2) - if len(recongnize_text) !=0: - count_state = int(recongnize_text[0]) - recongnize_text = recongnize_text[1:] - else: - count_state = 0 - - threshold = 0.8 - detect_greeting = 0 - text = speech_to_text(audio) - if "ご視聴ありがとうございました" in text: - text = "" - recongnize_text = recongnize_text + " " + text - query_embedding = text_model.encode(text, convert_to_tensor=True) - for greeting in greeting_list: - if greeting in text: - detect_greeting = 1 - break - if detect_greeting == 0: - hits = util.semantic_search(query_embedding, corpus_embeddings, top_k=1)[0] - if hits[0]['score'] > threshold: - detect_greeting = 1 - - recongnize_state = str(count_state + detect_greeting) + recongnize_text - return recongnize_text, recongnize_state, count_state - -def clear(): - return None, None, None - -demo = gr.Blocks(title=title) - -with demo: - gr.Markdown(''' -
      -

      挨拶カウンター

      -
      - ''') - with gr.Row(): - with gr.Column(): - audio_source = gr.Audio(source="microphone", type="filepath", streaming=True) - state = gr.State(value="") - with gr.Column(): - greeting_count = gr.Number(label="挨拶回数") - with gr.Row(): - text_output = gr.Textbox(label="認識されたテキスト") - audio_source.stream(voice_detect, inputs=[audio_source, state], outputs=[text_output, state, greeting_count]) - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/__init__.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Egrt/GCycleGAN/nets/resnest/splat.py b/spaces/Egrt/GCycleGAN/nets/resnest/splat.py deleted file mode 100644 index c3f21b19ac75534521b9a0eae957e8ee454f1cd4..0000000000000000000000000000000000000000 --- a/spaces/Egrt/GCycleGAN/nets/resnest/splat.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Split-Attention""" - -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn import Conv2d, Module, Linear, BatchNorm2d, ReLU -from torch.nn.modules.utils import _pair - -__all__ = ['SplAtConv2d'] - -class SplAtConv2d(Module): - """Split-Attention Conv2d - """ - def __init__(self, in_channels, channels, kernel_size, stride=(1, 1), padding=(0, 0), - dilation=(1, 1), groups=1, bias=True, - radix=2, reduction_factor=4, - rectify=False, rectify_avg=False, norm_layer=None, - dropblock_prob=0.0, **kwargs): - super(SplAtConv2d, self).__init__() - padding = _pair(padding) - self.rectify = rectify and (padding[0] > 0 or padding[1] > 0) - self.rectify_avg = rectify_avg - inter_channels = max(in_channels*radix//reduction_factor, 32) - self.radix = radix - self.cardinality = groups - self.channels = channels - self.dropblock_prob = dropblock_prob - if self.rectify: - from rfconv import RFConv2d - self.conv = RFConv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation, - groups=groups*radix, bias=bias, average_mode=rectify_avg, **kwargs) - else: - self.conv = Conv2d(in_channels, channels*radix, kernel_size, stride, padding, dilation, - groups=groups*radix, bias=bias, **kwargs) - self.use_bn = norm_layer is not None - if self.use_bn: - self.bn0 = norm_layer(channels*radix) - self.relu = ReLU(inplace=True) - self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality) - if self.use_bn: - self.bn1 = norm_layer(inter_channels) - self.fc2 = Conv2d(inter_channels, channels*radix, 1, groups=self.cardinality) - if dropblock_prob > 0.0: - self.dropblock = DropBlock2D(dropblock_prob, 3) - self.rsoftmax = rSoftMax(radix, groups) - - def forward(self, x): - x = self.conv(x) - if self.use_bn: - x = self.bn0(x) - if self.dropblock_prob > 0.0: - x = self.dropblock(x) - x = self.relu(x) - - batch, rchannel = x.shape[:2] - if self.radix > 1: - if torch.__version__ < '1.5': - splited = torch.split(x, int(rchannel//self.radix), dim=1) - else: - splited = torch.split(x, rchannel//self.radix, dim=1) - gap = sum(splited) - else: - gap = x - gap = F.adaptive_avg_pool2d(gap, 1) - gap = self.fc1(gap) - - if self.use_bn: - gap = self.bn1(gap) - gap = self.relu(gap) - - atten = self.fc2(gap) - atten = self.rsoftmax(atten).view(batch, -1, 1, 1) - - if self.radix > 1: - if torch.__version__ < '1.5': - attens = torch.split(atten, int(rchannel//self.radix), dim=1) - else: - attens = torch.split(atten, rchannel//self.radix, dim=1) - out = sum([att*split for (att, split) in zip(attens, splited)]) - else: - out = atten * x - return out.contiguous() - -class rSoftMax(nn.Module): - def __init__(self, radix, cardinality): - super().__init__() - self.radix = radix - self.cardinality = cardinality - - def forward(self, x): - batch = x.size(0) - if self.radix > 1: - x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) - x = F.softmax(x, dim=1) - x = x.reshape(batch, -1) - else: - x = torch.sigmoid(x) - return x - diff --git a/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py b/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py deleted file mode 100644 index a1904c3bd9b857cf77c507e98e431d4bc50f4251..0000000000000000000000000000000000000000 --- a/spaces/EuroSciPy2022/xgboost-income-prediction-with-explainability/app.py +++ /dev/null @@ -1,176 +0,0 @@ -import random - -import gradio as gr -import matplotlib -import matplotlib.pyplot as plt -import pandas as pd -import shap -import xgboost as xgb -from datasets import load_dataset - -matplotlib.use("Agg") - -dataset = load_dataset("scikit-learn/adult-census-income") - -X_train = dataset["train"].to_pandas() -_ = X_train.pop("fnlwgt") -_ = X_train.pop("race") - -y_train = X_train.pop("income") -y_train = (y_train == ">50K").astype(int) -categorical_columns = [ - "workclass", - "education", - "marital.status", - "occupation", - "relationship", - "sex", - "native.country", -] -X_train = X_train.astype({col: "category" for col in categorical_columns}) - - -data = xgb.DMatrix(X_train, label=y_train, enable_categorical=True) -model = xgb.train(params={"objective": "binary:logistic"}, dtrain=data) -explainer = shap.TreeExplainer(model) - - -def predict(*args): - df = pd.DataFrame([args], columns=X_train.columns) - df = df.astype({col: "category" for col in categorical_columns}) - pos_pred = model.predict(xgb.DMatrix(df, enable_categorical=True)) - return {">50K": float(pos_pred[0]), "<=50K": 1 - float(pos_pred[0])} - - -def interpret(*args): - df = pd.DataFrame([args], columns=X_train.columns) - df = df.astype({col: "category" for col in categorical_columns}) - shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True)) - scores_desc = list(zip(shap_values[0], X_train.columns)) - scores_desc = sorted(scores_desc) - fig_m = plt.figure(tight_layout=True) - plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc]) - plt.title("Feature Shap Values") - plt.ylabel("Shap Value") - plt.xlabel("Feature") - plt.tight_layout() - return fig_m - - -unique_class = sorted(X_train["workclass"].unique()) -unique_education = sorted(X_train["education"].unique()) -unique_marital_status = sorted(X_train["marital.status"].unique()) -unique_relationship = sorted(X_train["relationship"].unique()) -unique_occupation = sorted(X_train["occupation"].unique()) -unique_sex = sorted(X_train["sex"].unique()) -unique_country = sorted(X_train["native.country"].unique()) - -with gr.Blocks() as demo: - gr.Markdown(""" - ## Income Classification with XGBoost 💰 - - This example shows how to load data from the hugging face hub to train an XGBoost classifier and - demo the predictions with gradio. - - The source is [here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability). - """) - with gr.Row(): - with gr.Column(): - age = gr.Slider(label="Age", minimum=17, maximum=90, step=1, randomize=True) - work_class = gr.Dropdown( - label="Workclass", - choices=unique_class, - value=lambda: random.choice(unique_class), - ) - education = gr.Dropdown( - label="Education Level", - choices=unique_education, - value=lambda: random.choice(unique_education), - ) - years = gr.Slider( - label="Years of schooling", - minimum=1, - maximum=16, - step=1, - randomize=True, - ) - marital_status = gr.Dropdown( - label="Marital Status", - choices=unique_marital_status, - value=lambda: random.choice(unique_marital_status), - ) - occupation = gr.Dropdown( - label="Occupation", - choices=unique_occupation, - value=lambda: random.choice(unique_occupation), - ) - relationship = gr.Dropdown( - label="Relationship Status", - choices=unique_relationship, - value=lambda: random.choice(unique_relationship), - ) - sex = gr.Dropdown( - label="Sex", choices=unique_sex, value=lambda: random.choice(unique_sex) - ) - capital_gain = gr.Slider( - label="Capital Gain", - minimum=0, - maximum=100000, - step=500, - randomize=True, - ) - capital_loss = gr.Slider( - label="Capital Loss", minimum=0, maximum=10000, step=500, randomize=True - ) - hours_per_week = gr.Slider( - label="Hours Per Week Worked", minimum=1, maximum=99, step=1 - ) - country = gr.Dropdown( - label="Native Country", - choices=unique_country, - value=lambda: random.choice(unique_country), - ) - with gr.Column(): - label = gr.Label() - plot = gr.Plot() - with gr.Row(): - predict_btn = gr.Button(value="Predict") - interpret_btn = gr.Button(value="Interpret") - predict_btn.click( - predict, - inputs=[ - age, - work_class, - education, - years, - marital_status, - occupation, - relationship, - sex, - capital_gain, - capital_loss, - hours_per_week, - country, - ], - outputs=[label], - ) - interpret_btn.click( - interpret, - inputs=[ - age, - work_class, - education, - years, - marital_status, - occupation, - relationship, - sex, - capital_gain, - capital_loss, - hours_per_week, - country, - ], - outputs=[plot], - ) - -demo.launch() diff --git a/spaces/Flux9665/IMS-Toucan/README.md b/spaces/Flux9665/IMS-Toucan/README.md deleted file mode 100644 index 80144846a8e59a50d094b1c404342cc9c3c7e821..0000000000000000000000000000000000000000 --- a/spaces/Flux9665/IMS-Toucan/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Multilingual TTS -emoji: 🌍🦜 -colorFrom: green -colorTo: yellow -sdk: gradio -sdk_version: 2.7.5.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md deleted file mode 100644 index 00ac8ed128577ad7ef30cb839ac40bf7cf5c49ea..0000000000000000000000000000000000000000 --- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: Glide Text2im -emoji: 📊 -colorFrom: purple -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false -duplicated_from: valhalla/glide-text2im ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py deleted file mode 100644 index ee72773c2c891d2dda6d02933e88599b5330b052..0000000000000000000000000000000000000000 --- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/clip/encoders.py +++ /dev/null @@ -1,497 +0,0 @@ -import math -from collections import OrderedDict -from typing import List, Optional, Tuple, cast - -import attr -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .attention import ( - AttentionInfo, - DenseAttentionMask, - DenseCausalAttentionMask, - make_full_layout, - to_attention_info, -) -from .utils import Affine, LayerNorm, zero_key_bias_grad - -# Constants used in the original CLIP implementation. -image_channel_means = [122.77093945, 116.74601272, 104.09373519] -image_channel_stds = [68.50053285, 66.63215831, 70.32316309] - - -@attr.s(eq=False, repr=False) -class TextEmbedding(nn.Module): - n_vocab: int = attr.ib() - n_context: int = attr.ib() - n_state: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - w_voc = torch.empty((self.n_vocab, self.n_state), dtype=torch.float32, device=self.device) - w_pos = torch.empty((self.n_context, self.n_state), dtype=torch.float32, device=self.device) - - with torch.no_grad(): - w_voc.normal_(std=0.02) - w_pos.normal_(std=0.01) - - self.w_voc = nn.Parameter(w_voc) - self.w_pos = nn.Parameter(w_pos) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - if len(x.shape) != 2: - raise ValueError() - - return F.embedding(x, self.w_voc) + self.w_pos[None, :, :] - - -@attr.s(eq=False, repr=False) -class ImageEmbedding(nn.Module): - image_size: int = attr.ib() - patch_size: int = attr.ib() - n_state: int = attr.ib() - n_timestep: int = attr.ib(default=0) - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - if self.image_size % self.patch_size != 0: - raise ValueError() - - n_patch = self.image_size // self.patch_size - patch_proj = torch.empty( - (self.n_state, 3) + 2 * (self.patch_size,), dtype=torch.float32, device=self.device - ) - w_pos = torch.empty( - (1 + n_patch ** 2, self.n_state), dtype=torch.float32, device=self.device - ) - - with torch.no_grad(): - if self.n_timestep == 0: - pred_state = torch.empty((self.n_state,), dtype=torch.float32, device=self.device) - pred_state.normal_(std=1 / np.sqrt(self.n_state)) - self.pred_state = nn.Parameter(pred_state) - else: - w_t = torch.empty( - (self.n_timestep, self.n_state), dtype=torch.float32, device=self.device - ) - w_t.normal_(std=1 / np.sqrt(self.n_state)) - self.w_t = nn.Parameter(w_t) - - patch_proj.normal_(std=np.sqrt(2 / (self.n_state * self.patch_size ** 2))) - w_pos.normal_(std=1 / np.sqrt(self.n_state)) - - self.patch_proj = nn.Parameter(patch_proj) - self.w_pos = nn.Parameter(w_pos) - - self.channel_means = torch.tensor( - image_channel_means, dtype=torch.float32, device=self.device - )[None, :, None, None] - self.channel_stds = torch.tensor( - image_channel_stds, dtype=torch.float32, device=self.device - )[None, :, None, None] - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - - def forward(self, x: torch.Tensor, t: Optional[torch.Tensor] = None) -> torch.Tensor: - if len(x.shape) != 4: - raise ValueError("input should be 4d") - if x.shape[1] != 3: - raise ValueError("input should have 3 channels") - if not (x.shape[2] == self.image_size and x.shape[3] == self.image_size): - raise ValueError(f"input is not {self.image_size} x {self.image_size}") - - if (self.n_timestep == 0 and t is not None) or (self.n_timestep != 0 and t is None): - raise ValueError() - if self.n_timestep != 0: - assert t is not None - if len(t.shape) != 1: - raise ValueError() - if t.shape[0] != x.shape[0]: - raise ValueError() - - x = (x - self.channel_means) / self.channel_stds - x = F.conv2d(x, self.patch_proj, stride=self.patch_size) - x = x.reshape(x.shape[0], self.n_state, (self.image_size // self.patch_size) ** 2).permute( - 0, 2, 1 - ) - - sot = ( - self.pred_state[None, None].expand(x.shape[0], -1, -1) - if self.n_timestep == 0 - else F.embedding(cast(torch.Tensor, t), self.w_t)[:, None] - ) - x = torch.cat((sot, x), dim=1) + self.w_pos[None] - return self.ln(x) - - -@attr.s(eq=False, repr=False) -class AttentionResblock(nn.Module): - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - attn_fn: AttentionInfo = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_head_state = self.n_state // self.attn_fn.n_heads - self.qk_scale = 1 / np.sqrt(self.n_head_state) - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f_q = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=True, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_k = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=False, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_v = Affine( - self.n_state, - self.n_state, - std=1 / math.sqrt(self.n_state), - use_bias=True, - bias_filter_fn=zero_key_bias_grad, - device=self.device, - ) - self.f_c = Affine( - self.n_state, - self.n_state, - use_bias=True, - std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2), - device=self.device, - ) # XXX - - def forward(self, m: torch.Tensor) -> torch.Tensor: - n_context = m.shape[1] - n_query_pad = self.attn_fn.ctx_blks_q * self.attn_fn.block_size - n_context - n_key_pad = self.attn_fn.ctx_blks_k * self.attn_fn.block_size - n_context - assert n_query_pad >= 0 - assert n_key_pad >= 0 - - r = m - r = self.ln(r) - q, k, v = self.f_q(r), self.f_k(r), self.f_v(r) - - if n_query_pad != 0: - q = F.pad(q, (0, 0, 0, n_query_pad)) - - if n_key_pad != 0: - k = F.pad(k, (0, 0, 0, n_key_pad)) - v = F.pad(v, (0, 0, 0, n_key_pad)) - - q = q.view([q.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - k = k.view([k.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - v = v.view([v.shape[0], -1, self.attn_fn.n_heads, self.n_head_state]).permute((0, 2, 1, 3)) - w = torch.einsum( - "bhcd,bhkd->bhck", q * math.sqrt(self.qk_scale), k * math.sqrt(self.qk_scale) - ) - - if hasattr(self.attn_fn, "pytorch_attn_bias"): - bias = self.attn_fn.pytorch_attn_bias - assert len(bias.shape) in {2, 3} - - if len(bias.shape) == 2: - w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None, None], dim=-1) - elif len(bias.shape) == 3: - w = torch.softmax(w + self.attn_fn.pytorch_attn_bias[None], dim=-1) - else: - w = torch.softmax(w, dim=-1) - - r = torch.einsum("bhck,bhkd->bhcd", w, v) - r = r.permute((0, 2, 1, 3)).reshape((r.shape[0], -1, self.n_state)) - - if n_query_pad != 0: - r = r[:, :-n_query_pad] - - assert r.shape[1] == n_context - - r = self.f_c(r) - return m + r - - -@attr.s(eq=False, repr=False) -class FullyConnectedResblock(nn.Module): - """ - Not imported from other files because we retain Alec's original inits. - """ - - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f_1 = Affine( - self.n_state, - 4 * self.n_state, - use_bias=True, - std=np.sqrt(2 / (4 * self.n_state)), - device=self.device, - ) - self.f_2 = Affine( - 4 * self.n_state, - self.n_state, - use_bias=True, - std=1 / np.sqrt(self.n_state * self.n_resblocks ** 2), - device=self.device, - ) # XXX - - def forward(self, m: torch.Tensor) -> torch.Tensor: - r = m - r = self.ln(r) - - r = self.f_2(F.gelu(self.f_1(r))) - return m + r - - -@attr.s(eq=False, repr=False) -class TransformerBlock(nn.Module): - n_state: int = attr.ib() - n_resblocks: int = attr.ib() - attn_fn: AttentionInfo = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.f_attn = AttentionResblock( - self.n_state, - self.n_resblocks, - self.attn_fn, - self.device, - ) - self.f_mlp = FullyConnectedResblock(self.n_state, self.n_resblocks, self.device) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - return self.f_mlp(self.f_attn(x)) - - -@attr.s(eq=False, repr=False) -class TextFeatureExtractor(nn.Module): - n_state: int = attr.ib() - n_embd: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device) - - def forward( - self, text: torch.Tensor, text_len: torch.Tensor, return_probe_features: bool = False - ) -> torch.Tensor: - if len(text.shape) != 3: - raise ValueError("expected text to be 3d") - if len(text_len.shape) != 1: - raise ValueError("expected text length to be 1d") - if text.shape[0] != text_len.shape[0]: - raise ValueError("text and text_len have inconsistent batch dimensions") - - index = (text_len - 1)[:, None, None].expand(-1, 1, text.shape[2]) - x = torch.gather(text, dim=1, index=index) - assert list(x.shape) == [text.shape[0], 1, text.shape[2]] - - if return_probe_features: - return x[:, 0] - - x = self.ln(x) - return self.f(x[:, 0]) - - -@attr.s(eq=False, repr=False) -class ImageFeatureExtractor(nn.Module): - n_state: int = attr.ib() - n_embd: int = attr.ib() - device: torch.device = attr.ib(default=torch.device("cuda")) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.ln = LayerNorm(self.n_state, eps=1e-5, device=self.device) - self.f = Affine(self.n_state, self.n_embd, use_bias=False, device=self.device) - - def forward(self, x: torch.Tensor, return_probe_features: bool = False) -> torch.Tensor: - if return_probe_features: - return x[:, 0] - - x = self.ln(x[:, :1]) - return self.f(x[:, 0]) - - -@attr.s(eq=False, repr=False) -class TextEncoder(nn.Module): - n_bpe_vocab: int = attr.ib() - max_text_len: int = attr.ib() - n_embd: int = attr.ib() - n_head: int = attr.ib() - n_xf_blocks: int = attr.ib() - n_head_state: int = attr.ib(default=64) - device: torch.device = attr.ib(default=torch.device("cuda")) - block_size: int = attr.ib(init=False, default=32) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_state = self.n_head * self.n_head_state - n_rounded_context = self.block_size * int(math.ceil(self.max_text_len / self.block_size)) - n_pad = n_rounded_context - self.max_text_len - - args = ( - n_rounded_context, - n_rounded_context, - self.block_size, - self.n_head, - False, - n_pad, - n_pad, - ) - mask = DenseCausalAttentionMask(*args) - attn_fn = to_attention_info(mask) - - m = 1 - make_full_layout(mask).astype(np.float32) - m[m == 1] = -1e10 - attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device) - - blocks: List[Tuple[str, nn.Module]] = [ - ( - "input", - TextEmbedding( - self.n_bpe_vocab, self.max_text_len, self.n_state, device=self.device - ), - ) - ] - - for i in range(self.n_xf_blocks): - blocks.append( - ( - f"block_{i}", - TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device), - ) - ) - - blocks.append( - ("output", TextFeatureExtractor(self.n_state, self.n_embd, device=self.device)) - ) - - self.blocks = nn.ModuleDict(OrderedDict(blocks)) - - def forward( - self, - text: torch.Tensor, - text_len: torch.Tensor, - return_probe_features: bool = False, - ) -> torch.Tensor: - - n_batch = text.shape[0] - h = self.blocks["input"](text) - - for i in range(self.n_xf_blocks): - h = self.blocks[f"block_{i}"](h) - - h = self.blocks["output"](h, text_len, return_probe_features=return_probe_features) - - assert list(h.shape) == [ - n_batch, - self.n_embd if not return_probe_features else self.n_state, - ] - return h - - -@attr.s(eq=False, repr=False) -class ImageEncoder(nn.Module): - image_size: int = attr.ib() - patch_size: int = attr.ib() - n_embd: int = attr.ib() - n_head: int = attr.ib() - n_xf_blocks: int = attr.ib() - n_head_state: int = attr.ib(default=64) - n_timestep: int = attr.ib(default=0) - device: torch.device = attr.ib(default=torch.device("cuda")) - block_size: int = attr.ib(init=False, default=32) - - def __attrs_post_init__(self) -> None: - super().__init__() - - self.n_state = self.n_head * self.n_head_state - self.n_context = 1 + (self.image_size // self.patch_size) ** 2 - n_rounded_context = self.block_size * int(math.ceil(self.n_context / self.block_size)) - n_pad = n_rounded_context - self.n_context - - args = ( - n_rounded_context, - n_rounded_context, - self.block_size, - self.n_head, - False, - n_pad, - n_pad, - ) - mask = DenseAttentionMask(*args) - attn_fn = to_attention_info(mask) - - m = 1 - make_full_layout(mask).astype(np.float32) - m[m == 1] = -1e10 - attn_fn.pytorch_attn_bias = torch.from_numpy(m).to(self.device) - - blocks: List[Tuple[str, nn.Module]] = [ - ( - "input", - ImageEmbedding( - self.image_size, - self.patch_size, - self.n_state, - n_timestep=self.n_timestep, - device=self.device, - ), - ) - ] - - for i in range(self.n_xf_blocks): - blocks.append( - ( - f"block_{i}", - TransformerBlock(self.n_state, 2 * self.n_xf_blocks, attn_fn, self.device), - ) - ) - - blocks.append(("output", ImageFeatureExtractor(self.n_state, self.n_embd, self.device))) - - self.blocks = nn.ModuleDict(OrderedDict(blocks)) - - def forward( - self, - image: torch.Tensor, - timesteps: Optional[torch.Tensor] = None, - return_probe_features: bool = False, - ) -> torch.Tensor: - n_batch = image.shape[0] - h = self.blocks["input"](image, t=timesteps) - - for i in range(self.n_xf_blocks): - h = self.blocks[f"block_{i}"](h) - - h = self.blocks["output"](h, return_probe_features=return_probe_features) - - assert list(h.shape) == [ - n_batch, - self.n_embd if not return_probe_features else self.n_state, - ] - - return h diff --git a/spaces/GIZ/SDSN-demo/README.md b/spaces/GIZ/SDSN-demo/README.md deleted file mode 100644 index 802ed4ec16f79564907c3f8150a0ab4f9cc1de35..0000000000000000000000000000000000000000 --- a/spaces/GIZ/SDSN-demo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SDSN Demo -emoji: 📈 -colorFrom: purple -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py b/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py deleted file mode 100644 index 09d8cb9680f71758018bffe82838a763ca46fe31..0000000000000000000000000000000000000000 --- a/spaces/GaenKoki/voicevox/voicevox_engine/utility/mutex_utility.py +++ /dev/null @@ -1,15 +0,0 @@ -import threading - - -def mutex_wrapper(lock: threading.Lock): - def wrap(f): - def func(*args, **kw): - lock.acquire() - try: - return f(*args, **kw) - finally: - lock.release() - - return func - - return wrap diff --git a/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py b/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py deleted file mode 100644 index 191963b2a50622700a123b6b945b88ade1ac1ab2..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/notebooks/dataset_test.py +++ /dev/null @@ -1,166 +0,0 @@ -import os -import sys -import numpy as np -import hydra - -from cliport.dataset import RavensDataset -from cliport.utils import utils -from cliport import tasks -from cliport.environments.environment import Environment - -import torch - - -import matplotlib -import matplotlib.pyplot as plt - - - -mode = 'train' -augment = True - -### Uncomment the task you want to generate ### -# task = 'align-rope' -# task = 'assembling-kits-seq-seen-colors' -# task = 'assembling-kits-seq-unseen-colors' -# task = 'assembling-kits-seq-full' -# task = 'packing-shapes' -# task = 'packing-boxes-pairs-seen-colors' -# task = 'packing-boxes-pairs-unseen-colors' -# task = 'packing-boxes-pairs-full' -# task = 'packing-seen-google-objects-seq' -# task = 'packing-unseen-google-objects-seq' -# task = 'packing-seen-google-objects-group' -# task = 'packing-unseen-google-objects-group' -# task = 'put-block-in-bowl-seen-colors' -# task = 'put-block-in-bowl-unseen-colors' -# task = 'put-block-in-bowl-full' -task = 'align-box-corner' -# task = 'stack-block-pyramid-seq-unseen-colors' -# task = 'stack-block-pyramid-seq-full' -# task = 'separating-piles-seen-colors' -# task = 'separating-piles-unseen-colors' -# task = 'separating-piles-full' -# task = 'towers-of-hanoi-seq-seen-colors' -# task = 'towers-of-hanoi-seq-unseen-colors' -# task = 'towers-of-hanoi-seq-full' - -### visualization settings -max_episodes = 1 -max_steps = 100 - - - -root_dir = os.environ['CLIPORT_ROOT'] -config_file = 'train.yaml' -cfg = utils.load_hydra_config(os.path.join(root_dir, f'cliport/cfg/{config_file}')) - -# Override defaults -cfg['task'] = task -cfg['mode'] = mode -cfg['train']['data_augmentation'] = True -data_dir = os.path.join(root_dir, 'data') - - - -task = tasks.names[cfg['task']]() -task.mode = mode - -ds = RavensDataset(os.path.join(data_dir, f'{cfg["task"]}-{cfg["mode"]}'), cfg, n_demos=10, augment=augment) - - - -color_sums = [] -depth_sums = [] - -total_images = 0 - -for i in range(0, min(max_episodes, ds.n_episodes)): - print(f'\n\nEpisode: {i + 1}/{ds.n_episodes}') - episode, seed = ds.load(i) - - total_images += len(episode)-1 - - total_reward = 0 - for step in range(min(max_steps, len(episode))): - print(f"\nStep: {step+1}/{len(episode)}") - obs, act, reward, info = episode[step] - - total_reward += reward - batch = ds[i] - - num_images = len(obs['color']) - fig, axs = plt.subplots(2, num_images+1, figsize=(15, 6)) - for n in range(num_images): - axs[1, n].imshow(obs['color'][n]) - axs[1, n].set_title(f'Raw RGB {n+1}') - - axs[0, n].imshow(obs['depth'][n]) - axs[0, n].set_title(f'Raw Depth {n+1}') - - color_sums.append(np.mean(obs['color'][0], axis=(0,1)) / 255.0) - depth_sums.append(np.mean(obs['depth'][0], axis=(0,1))) - - cam_config = None - if b'camera_info' in info: - cam_config = ds.get_cam_config(info[b'camera_info']) - - img_depth = ds.get_image(obs, cam_config=cam_config) - img_tensor = torch.from_numpy(img_depth) - img = np.uint8(img_tensor.detach().cpu().numpy()) - img = img.transpose(1,0,2) - - if step < len(episode)-1 and episode[step]: - batch = ds.process_sample(episode[step], augment=augment) - else: - batch = ds.process_goal(episode[step], perturb_params=None) - - img_sample = batch['img'] - img_sample = torch.from_numpy(img_sample) - color = np.uint8(img_sample.detach().cpu().numpy())[:,:,:3] - color = color.transpose(1,0,2) - depth = np.array(img_sample.detach().cpu().numpy())[:,:,3] - depth = depth.transpose(1,0) - - axs[0, num_images].imshow(depth) - axs[0, num_images].set_title('Depth') - - axs[1,num_images].imshow(color) - axs[1,num_images].set_title('RGB + Oracle Pick & Place') - - if act and step < len(episode)-1: - p0 = batch['p0'] - p1 = batch['p1'] - p0_theta = batch['p0_theta'] - p1_theta = batch['p1_theta'] + p0_theta - - pick = p0 - place = p1 - - line_len = 30 - pick0 = (pick[0] + line_len/2.0 * np.sin(p0_theta), pick[1] + line_len/2.0 * np.cos(p0_theta)) - pick1 = (pick[0] - line_len/2.0 * np.sin(p0_theta), pick[1] - line_len/2.0 * np.cos(p0_theta)) - axs[1,num_images].plot((pick1[0], pick0[0]), (pick1[1], pick0[1]), color='r', linewidth=2) - - place0 = (place[0] + line_len/2.0 * np.sin(p1_theta), place[1] + line_len/2.0 * np.cos(p1_theta)) - place1 = (place[0] - line_len/2.0 * np.sin(p1_theta), place[1] - line_len/2.0 * np.cos(p1_theta)) - axs[1,num_images].plot((place1[0], place0[0]), (place1[1], place0[1]), color='g', linewidth=2) - - c_pick = plt.Circle(pick, 3, color='r', fill=False) - c_place = plt.Circle(place, 3, color='g', fill=False) - - axs[1,num_images].add_patch(c_pick) - axs[1,num_images].add_patch(c_place) - - plt.show() - - print(f"Language Goal: {batch['lang_goal']}") - print(f"Step Reward: {reward}") - print(f"Total Reward: {total_reward}") - - print(f"Done, Total Reward: {total_reward}") - -print("\n\nDataset Statistics: ") -print(f"Color Mean: {np.mean(color_sums, axis=0)}, Std: {np.std(color_sums, axis=0)}") -print(f"Depth Mean: {np.mean(depth_sums, axis=0)}, Std: {np.std(depth_sums, axis=0)}") -print(f"Total Image-Action Pairs: {total_images}") \ No newline at end of file diff --git a/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py b/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py deleted file mode 100644 index 857f2af29886fca6eb4df506853f446066af7c04..0000000000000000000000000000000000000000 --- a/spaces/GitMylo/bark-voice-cloning/hubert/hubert_manager.py +++ /dev/null @@ -1,33 +0,0 @@ -import os.path -import shutil -import urllib.request - -import huggingface_hub - - -class HuBERTManager: - @staticmethod - def make_sure_hubert_installed(download_url: str = 'https://dl.fbaipublicfiles.com/hubert/hubert_base_ls960.pt', file_name: str = 'hubert.pt'): - install_dir = os.path.join('data', 'models', 'hubert') - if not os.path.isdir(install_dir): - os.makedirs(install_dir, exist_ok=True) - install_file = os.path.join(install_dir, file_name) - if not os.path.isfile(install_file): - print('Downloading HuBERT base model') - urllib.request.urlretrieve(download_url, install_file) - print('Downloaded HuBERT') - return install_file - - - @staticmethod - def make_sure_tokenizer_installed(model: str = 'quantifier_hubert_base_ls960_14.pth', repo: str = 'GitMylo/bark-voice-cloning', local_file: str = 'tokenizer.pth'): - install_dir = os.path.join('data', 'models', 'hubert') - if not os.path.isdir(install_dir): - os.makedirs(install_dir, exist_ok=True) - install_file = os.path.join(install_dir, local_file) - if not os.path.isfile(install_file): - print('Downloading HuBERT custom tokenizer') - huggingface_hub.hf_hub_download(repo, model, local_dir=install_dir, local_dir_use_symlinks=False) - shutil.move(os.path.join(install_dir, model), install_file) - print('Downloaded tokenizer') - return install_file diff --git a/spaces/Godrose0728/Aisound02/text/__init__.py b/spaces/Godrose0728/Aisound02/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/Godrose0728/Aisound02/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/Gradio-Blocks/anime-colorization/README.md b/spaces/Gradio-Blocks/anime-colorization/README.md deleted file mode 100644 index ca77164f07a41a87a016a5fb3d4a1afa7d1923e6..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/anime-colorization/README.md +++ /dev/null @@ -1,188 +0,0 @@ ---- -title: Anime Colorization -emoji: 😻 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.0.5 -app_file: app.py -pinned: false -license: mit ---- - -# Pixel Guide Diffusion For Anime Colorization - -![avatar](docs/imgs/sample.png) - -Use denoising diffusion probabilistic model to do the anime colorization task. - -v1 test result is in branch [v1_result](https://github.com/HighCWu/pixel-guide-diffusion-for-anime-colorization/tree/v1_result). - -The dataset is not clean enough and the sketch as the guide is generated using sketch2keras, so the generalization is not good. - -In the future, I may try to use only anime portraits as the target images, and look for some more diverse sketch models. - -# Introduction and Usage - -Pixel Guide Denoising Diffusion Probabilistic Models ( One Channel Guide Version ) - -This repo is modified from [improved-diffusion](https://github.com/openai/improved-diffusion). - -Use [danbooru-sketch-pair-128x](https://www.kaggle.com/wuhecong/danbooru-sketch-pair-128x) as the dataset. Maybe you should move folders in the dataset first to make guide-target pair dataset. - -Modify `train_danbooru*.sh`, `test_danbooru*.sh` to meet your needs. - -The model is divided into a 32px part and a super-divided part, which can be cascaded during testing to get the final result. But there is no cascade during training. - -QQ Group: 1044867291 - -Discord: https://discord.gg/YwWcAS47qb - -# Original README - -# improved-diffusion - -This is the codebase for [Improved Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2102.09672). - -# Usage - -This section of the README walks through how to train and sample from a model. - -## Installation - -Clone this repository and navigate to it in your terminal. Then run: - -``` -pip install -e . -``` - -This should install the ~~`improved_diffusion`~~ `pixel_guide_diffusion` python package that the scripts depend on. - -## Preparing Data - -The training code reads images from a directory of image files. In the [datasets](datasets) folder, we have provided instructions/scripts for preparing these directories for ImageNet, LSUN bedrooms, and CIFAR-10. - -For creating your own dataset, simply dump all of your images into a directory with ".jpg", ".jpeg", or ".png" extensions. If you wish to train a class-conditional model, name the files like "mylabel1_XXX.jpg", "mylabel2_YYY.jpg", etc., so that the data loader knows that "mylabel1" and "mylabel2" are the labels. Subdirectories will automatically be enumerated as well, so the images can be organized into a recursive structure (although the directory names will be ignored, and the underscore prefixes are used as names). - -The images will automatically be scaled and center-cropped by the data-loading pipeline. Simply pass `--data_dir path/to/images` to the training script, and it will take care of the rest. - -## Training - -To train your model, you should first decide some hyperparameters. We will split up our hyperparameters into three groups: model architecture, diffusion process, and training flags. Here are some reasonable defaults for a baseline: - -``` -MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128" -``` - -Here are some changes we experiment with, and how to set them in the flags: - - * **Learned sigmas:** add `--learn_sigma True` to `MODEL_FLAGS` - * **Cosine schedule:** change `--noise_schedule linear` to `--noise_schedule cosine` - * **Reweighted VLB:** add `--use_kl True` to `DIFFUSION_FLAGS` and add `--schedule_sampler loss-second-moment` to `TRAIN_FLAGS`. - * **Class-conditional:** add `--class_cond True` to `MODEL_FLAGS`. - -Once you have setup your hyper-parameters, you can run an experiment like so: - -``` -python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS -``` - -You may also want to train in a distributed manner. In this case, run the same command with `mpiexec`: - -``` -mpiexec -n $NUM_GPUS python scripts/image_train.py --data_dir path/to/images $MODEL_FLAGS $DIFFUSION_FLAGS $TRAIN_FLAGS -``` - -When training in a distributed manner, you must manually divide the `--batch_size` argument by the number of ranks. In lieu of distributed training, you may use `--microbatch 16` (or `--microbatch 1` in extreme memory-limited cases) to reduce memory usage. - -The logs and saved models will be written to a logging directory determined by the `OPENAI_LOGDIR` environment variable. If it is not set, then a temporary directory will be created in `/tmp`. - -## Sampling - -The above training script saves checkpoints to `.pt` files in the logging directory. These checkpoints will have names like `ema_0.9999_200000.pt` and `model200000.pt`. You will likely want to sample from the EMA models, since those produce much better samples. - -Once you have a path to your model, you can generate a large batch of samples like so: - -``` -python scripts/image_sample.py --model_path /path/to/model.pt $MODEL_FLAGS $DIFFUSION_FLAGS -``` - -Again, this will save results to a logging directory. Samples are saved as a large `npz` file, where `arr_0` in the file is a large batch of samples. - -Just like for training, you can run `image_sample.py` through MPI to use multiple GPUs and machines. - -You can change the number of sampling steps using the `--timestep_respacing` argument. For example, `--timestep_respacing 250` uses 250 steps to sample. Passing `--timestep_respacing ddim250` is similar, but uses the uniform stride from the [DDIM paper](https://arxiv.org/abs/2010.02502) rather than our stride. - -To sample using [DDIM](https://arxiv.org/abs/2010.02502), pass `--use_ddim True`. - -## Models and Hyperparameters - -This section includes model checkpoints and run flags for the main models in the paper. - -Note that the batch sizes are specified for single-GPU training, even though most of these runs will not naturally fit on a single GPU. To address this, either set `--microbatch` to a small value (e.g. 4) to train on one GPU, or run with MPI and divide `--batch_size` by the number of GPUs. - -Unconditional ImageNet-64 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_100M_1500K.pt)]: - -```bash -MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128" -``` - -Unconditional CIFAR-10 with our `L_hybrid` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_50M_500K.pt)]: - -```bash -MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128" -``` - -Class-conditional ImageNet-64 model (270M parameters, trained for 250K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_cond_270M_250K.pt)]: - -```bash -MODEL_FLAGS="--image_size 64 --num_channels 192 --num_res_blocks 3 --learn_sigma True --class_cond True" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine --rescale_learned_sigmas False --rescale_timesteps False" -TRAIN_FLAGS="--lr 3e-4 --batch_size 2048" -``` - -Upsampling 256x256 model (280M parameters, trained for 500K iterations) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/upsample_cond_500K.pt)]: - -```bash -MODEL_FLAGS="--num_channels 192 --num_res_blocks 2 --learn_sigma True --class_cond True" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False" -TRAIN_FLAGS="--lr 3e-4 --batch_size 256" -``` - -LSUN bedroom model (lr=1e-4) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_1200K_bs128.pt)]: - -```bash -MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16" -DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128" -``` - -LSUN bedroom model (lr=2e-5) [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/lsun_uncond_100M_2400K_bs64.pt)]: - -```bash -MODEL_FLAGS="--image_size 256 --num_channels 128 --num_res_blocks 2 --num_heads 1 --learn_sigma True --use_scale_shift_norm False --attention_resolutions 16" -DIFFUSION_FLAGS="--diffusion_steps 1000 --noise_schedule linear --rescale_learned_sigmas False --rescale_timesteps False --use_scale_shift_norm False" -TRAIN_FLAGS="--lr 2e-5 --batch_size 128" -``` - -Unconditional ImageNet-64 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/imagenet64_uncond_vlb_100M_1500K.pt)]: - -```bash -MODEL_FLAGS="--image_size 64 --num_channels 128 --num_res_blocks 3 --learn_sigma True" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment" -``` - -Unconditional CIFAR-10 with the `L_vlb` objective and cosine noise schedule [[checkpoint](https://openaipublic.blob.core.windows.net/diffusion/march-2021/cifar10_uncond_vlb_50M_500K.pt)]: - -```bash -MODEL_FLAGS="--image_size 32 --num_channels 128 --num_res_blocks 3 --learn_sigma True --dropout 0.3" -DIFFUSION_FLAGS="--diffusion_steps 4000 --noise_schedule cosine" -TRAIN_FLAGS="--lr 1e-4 --batch_size 128 --schedule_sampler loss-second-moment" -``` diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py deleted file mode 100644 index 169278e5738b0abd4ae5e99594e4adbaaefa2d96..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' -# learning policy -lr_config = dict(step=[28, 34]) -runner = dict(type='EpochBasedRunner', max_epochs=36) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py deleted file mode 100644 index 2bd81abcdc62debc14772659d7a171f20bf33364..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/bbox/samplers/pseudo_sampler.py +++ /dev/null @@ -1,41 +0,0 @@ -import torch - -from ..builder import BBOX_SAMPLERS -from .base_sampler import BaseSampler -from .sampling_result import SamplingResult - - -@BBOX_SAMPLERS.register_module() -class PseudoSampler(BaseSampler): - """A pseudo sampler that does not do sampling actually.""" - - def __init__(self, **kwargs): - pass - - def _sample_pos(self, **kwargs): - """Sample positive samples.""" - raise NotImplementedError - - def _sample_neg(self, **kwargs): - """Sample negative samples.""" - raise NotImplementedError - - def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): - """Directly returns the positive and negative indices of samples. - - Args: - assign_result (:obj:`AssignResult`): Assigned results - bboxes (torch.Tensor): Bounding boxes - gt_bboxes (torch.Tensor): Ground truth boxes - - Returns: - :obj:`SamplingResult`: sampler results - """ - pos_inds = torch.nonzero( - assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() - neg_inds = torch.nonzero( - assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() - gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) - sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, - assign_result, gt_flags) - return sampling_result diff --git a/spaces/HESOAYM/ElviraMulti/modules/shared.py b/spaces/HESOAYM/ElviraMulti/modules/shared.py deleted file mode 100644 index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000 --- a/spaces/HESOAYM/ElviraMulti/modules/shared.py +++ /dev/null @@ -1,55 +0,0 @@ -from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST -import os -import queue - -class State: - interrupted = False - multi_api_key = False - completion_url = COMPLETION_URL - balance_api_url = BALANCE_API_URL - usage_api_url = USAGE_API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_host(self, api_host): - self.completion_url = f"https://{api_host}/v1/chat/completions" - self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants" - self.usage_api_url = f"https://{api_host}/dashboard/billing/usage" - os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1" - - def reset_api_host(self): - self.completion_url = COMPLETION_URL - self.balance_api_url = BALANCE_API_URL - self.usage_api_url = USAGE_API_URL - os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1" - return API_HOST - - def reset_all(self): - self.interrupted = False - self.completion_url = COMPLETION_URL - - def set_api_key_queue(self, api_key_list): - self.multi_api_key = True - self.api_key_queue = queue.Queue() - for api_key in api_key_list: - self.api_key_queue.put(api_key) - - def switching_api_key(self, func): - if not hasattr(self, "api_key_queue"): - return func - - def wrapped(*args, **kwargs): - api_key = self.api_key_queue.get() - args[0].api_key = api_key - ret = func(*args, **kwargs) - self.api_key_queue.put(api_key) - return ret - - return wrapped - - -state = State() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py deleted file mode 100644 index 602ea737ed75a33fddf44dd859e999ecfce2730d..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/roberta/wsc/wsc_task.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os -import tempfile - -import numpy as np -import torch -import torch.nn.functional as F -from fairseq import utils -from fairseq.data import ( - Dictionary, - IdDataset, - ListDataset, - NestedDictionaryDataset, - NumelDataset, - NumSamplesDataset, - PadDataset, - SortDataset, - data_utils, - encoders, -) -from fairseq.tasks import LegacyFairseqTask, register_task - -from . import wsc_utils - - -@register_task("wsc") -class WSCTask(LegacyFairseqTask): - """Task to finetune RoBERTa for Winograd Schemas.""" - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument( - "data", metavar="DIR", help="path to data directory; we load .jsonl" - ) - parser.add_argument( - "--init-token", - type=int, - default=None, - help="add token at the beginning of each batch item", - ) - - def __init__(self, args, vocab): - super().__init__(args) - self.vocab = vocab - self.mask = vocab.add_symbol("") - - self.bpe = encoders.build_bpe(args) - self.tokenizer = encoders.build_tokenizer(args) - - # hack to handle GPT-2 BPE, which includes leading spaces - if args.bpe == "gpt2": - self.leading_space = True - self.trailing_space = False - else: - self.leading_space = False - self.trailing_space = True - - @classmethod - def load_dictionary(cls, filename): - """Load the dictionary from the filename - - Args: - filename (str): the filename - """ - dictionary = Dictionary.load(filename) - dictionary.add_symbol("") - return dictionary - - @classmethod - def setup_task(cls, args, **kwargs): - assert args.criterion == "wsc", "Must set --criterion=wsc" - - # load data and label dictionaries - vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) - print("| dictionary: {} types".format(len(vocab))) - - return cls(args, vocab) - - def binarize(self, s: str, append_eos: bool = False): - if self.tokenizer is not None: - s = self.tokenizer.encode(s) - if self.bpe is not None: - s = self.bpe.encode(s) - tokens = self.vocab.encode_line( - s, - append_eos=append_eos, - add_if_not_exist=False, - ).long() - if self.args.init_token is not None: - tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) - return tokens - - def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): - toks = self.binarize( - prefix + leading_space + txt + trailing_space + suffix, - append_eos=True, - ) - mask = torch.zeros_like(toks, dtype=torch.bool) - mask_start = len(self.binarize(prefix)) - mask_size = len(self.binarize(leading_space + txt)) - mask[mask_start : mask_start + mask_size] = 1 - return toks, mask - - def load_dataset( - self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs - ): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if data_path is None: - data_path = os.path.join(self.args.data, split + ".jsonl") - if not os.path.exists(data_path): - raise FileNotFoundError("Cannot find data: {}".format(data_path)) - - query_tokens = [] - query_masks = [] - query_lengths = [] - candidate_tokens = [] - candidate_masks = [] - candidate_lengths = [] - labels = [] - - for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path): - prefix = sentence[: pronoun_span.start].text - suffix = sentence[pronoun_span.end :].text_with_ws - - # spaCy spans include trailing spaces, but we need to know about - # leading spaces for the GPT-2 BPE - leading_space = ( - " " if sentence[: pronoun_span.start].text_with_ws.endswith(" ") else "" - ) - trailing_space = " " if pronoun_span.text_with_ws.endswith(" ") else "" - - # get noun phrases, excluding pronouns and anything overlapping with the query - cand_spans = wsc_utils.filter_noun_chunks( - wsc_utils.extended_noun_chunks(sentence), - exclude_pronouns=True, - exclude_query=query, - exact_match=False, - ) - - if query is not None: - query_toks, query_mask = self.binarize_with_mask( - query, prefix, suffix, leading_space, trailing_space - ) - query_len = len(query_toks) - else: - query_toks, query_mask, query_len = None, None, 0 - - query_tokens.append(query_toks) - query_masks.append(query_mask) - query_lengths.append(query_len) - - cand_toks, cand_masks = [], [] - for cand_span in cand_spans: - toks, mask = self.binarize_with_mask( - cand_span.text, - prefix, - suffix, - leading_space, - trailing_space, - ) - cand_toks.append(toks) - cand_masks.append(mask) - - # collate candidates - cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) - cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) - assert cand_toks.size() == cand_masks.size() - - candidate_tokens.append(cand_toks) - candidate_masks.append(cand_masks) - candidate_lengths.append(cand_toks.size(1)) - - labels.append(label) - - query_lengths = np.array(query_lengths) - query_tokens = ListDataset(query_tokens, query_lengths) - query_masks = ListDataset(query_masks, query_lengths) - - candidate_lengths = np.array(candidate_lengths) - candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) - candidate_masks = ListDataset(candidate_masks, candidate_lengths) - - labels = ListDataset(labels, [1] * len(labels)) - - dataset = { - "id": IdDataset(), - "query_tokens": query_tokens, - "query_masks": query_masks, - "candidate_tokens": candidate_tokens, - "candidate_masks": candidate_masks, - "labels": labels, - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(query_tokens, reduce=True), - } - - nested_dataset = NestedDictionaryDataset( - dataset, - sizes=[query_lengths], - ) - - with data_utils.numpy_seed(self.args.seed): - shuffle = np.random.permutation(len(query_tokens)) - dataset = SortDataset( - nested_dataset, - # shuffle - sort_order=[shuffle], - ) - - if return_only: - return dataset - - self.datasets[split] = dataset - return self.datasets[split] - - def build_dataset_for_inference(self, sample_json): - with tempfile.NamedTemporaryFile(buffering=0) as h: - h.write((json.dumps(sample_json) + "\n").encode("utf-8")) - dataset = self.load_dataset( - "disambiguate_pronoun", - data_path=h.name, - return_only=True, - ) - return dataset - - def disambiguate_pronoun(self, model, sentence, use_cuda=False): - sample_json = wsc_utils.convert_sentence_to_json(sentence) - dataset = self.build_dataset_for_inference(sample_json) - sample = dataset.collater([dataset[0]]) - if use_cuda: - sample = utils.move_to_cuda(sample) - - def get_masked_input(tokens, mask): - masked_tokens = tokens.clone() - masked_tokens[mask.bool()] = self.mask - return masked_tokens - - def get_lprobs(tokens, mask): - logits, _ = model(src_tokens=get_masked_input(tokens, mask)) - lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) - scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) - mask = mask.type_as(scores) - scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) - return scores - - cand_lprobs = get_lprobs( - sample["candidate_tokens"][0], - sample["candidate_masks"][0], - ) - if sample["query_tokens"][0] is not None: - query_lprobs = get_lprobs( - sample["query_tokens"][0].unsqueeze(0), - sample["query_masks"][0].unsqueeze(0), - ) - return (query_lprobs >= cand_lprobs).all().item() == 1 - else: - best_idx = cand_lprobs.argmax().item() - full_cand = sample["candidate_tokens"][0][best_idx] - mask = sample["candidate_masks"][0][best_idx] - toks = full_cand[mask.bool()] - return self.bpe.decode(self.source_dictionary.string(toks)).strip() - - @property - def source_dictionary(self): - return self.vocab - - @property - def target_dictionary(self): - return self.vocab - - -@register_task("winogrande") -class WinograndeTask(WSCTask): - """ - Task for WinoGrande dataset. Efficient implementation for Winograd schema - tasks with exactly two candidates, one of which is correct. - """ - - @classmethod - def setup_task(cls, args, **kwargs): - assert args.criterion == "winogrande", "Must set --criterion=winogrande" - - # load data and label dictionaries - vocab = cls.load_dictionary(os.path.join(args.data, "dict.txt")) - print("| dictionary: {} types".format(len(vocab))) - - return cls(args, vocab) - - def load_dataset( - self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs - ): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - if data_path is None: - data_path = os.path.join(self.args.data, split + ".jsonl") - if not os.path.exists(data_path): - raise FileNotFoundError("Cannot find data: {}".format(data_path)) - - query_tokens = [] - query_masks = [] - query_lengths = [] - candidate_tokens = [] - candidate_masks = [] - candidate_lengths = [] - - itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == "test")) - - for sample in itr: - sentence, pronoun_span, query, cand_text = sample - prefix = sentence[: pronoun_span[0]].rstrip() - suffix = sentence[pronoun_span[1] :] - - leading_space = " " if sentence[: pronoun_span[0]].endswith(" ") else "" - trailing_space = "" - - if query is not None: - query_toks, query_mask = self.binarize_with_mask( - query, - prefix, - suffix, - leading_space, - trailing_space, - ) - query_len = len(query_toks) - else: - query_toks, query_mask, query_len = None, None, 0 - - query_tokens.append(query_toks) - query_masks.append(query_mask) - query_lengths.append(query_len) - - cand_toks, cand_mask = self.binarize_with_mask( - cand_text, - prefix, - suffix, - leading_space, - trailing_space, - ) - - candidate_tokens.append(cand_toks) - candidate_masks.append(cand_mask) - candidate_lengths.append(cand_toks.size(0)) - - query_lengths = np.array(query_lengths) - - def get_pad_dataset_fn(tokens, length, pad_idx): - return PadDataset( - ListDataset(tokens, length), - pad_idx=pad_idx, - left_pad=False, - ) - - query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad()) - query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0) - - candidate_lengths = np.array(candidate_lengths) - candidate_tokens = get_pad_dataset_fn( - candidate_tokens, candidate_lengths, self.vocab.pad() - ) - candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0) - - dataset = { - "id": IdDataset(), - "query_tokens": query_tokens, - "query_masks": query_masks, - "candidate_tokens": candidate_tokens, - "candidate_masks": candidate_masks, - "nsentences": NumSamplesDataset(), - "ntokens": NumelDataset(query_tokens, reduce=True), - } - - nested_dataset = NestedDictionaryDataset( - dataset, - sizes=[query_lengths], - ) - - with data_utils.numpy_seed(self.args.seed): - shuffle = np.random.permutation(len(query_tokens)) - dataset = SortDataset( - nested_dataset, - # shuffle - sort_order=[shuffle], - ) - - if return_only: - return dataset - - self.datasets[split] = dataset - return self.datasets[split] diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py deleted file mode 100644 index c613f52d3c3de43a048849a231a9a34e2a883486..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/textless_nlp/gslm/speech2unit/pretrained/cpc_feature_reader.py +++ /dev/null @@ -1,192 +0,0 @@ -import soundfile as sf -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class CpcFeatureReader: - """ - Wrapper class to run inference on CPC model. - Helps extract features for a given audio file. - """ - - def __init__( - self, - checkpoint_path, - layer, - use_encoder_layer=False, - norm_features=False, - sample_rate=16000, - max_chunk=64000, - ): - self.model = load_cpc_model(checkpoint_path, layer).eval().cuda() - self.sample_rate = sample_rate - self.max_chunk = max_chunk - self.norm_features = norm_features - self.use_encoder_layer = use_encoder_layer - - def read_audio(self, path, ref_len=None): - wav, sr = sf.read(path) - if wav.ndim == 2: - wav = wav.mean(-1) - assert wav.ndim == 1, wav.ndim - assert sr == self.sample_rate, sr - if ref_len is not None and abs(ref_len - len(wav)) > 160: - print(f"ref {ref_len} != read {len(wav)} ({path})") - return wav - - def get_feats(self, file_path, ref_len=None): - x = self.read_audio(file_path, ref_len) - # Inspired from CPC_audio feature_loader.py - with torch.no_grad(): - x = torch.from_numpy(x).float().cuda() - x = x.view(1, 1, -1) - size = x.size(2) - feat = [] - start = 0 - while start < size: - if start + self.max_chunk > size: - break - x_chunk = x[..., start : start + self.max_chunk] - feat_chunk = self.model.extract_features( - source=x_chunk, - get_encoded=self.use_encoder_layer, - norm_output=self.norm_features, - ) - feat.append(feat_chunk) - start += self.max_chunk - - if start < size: - x_chunk = x[:, -self.max_chunk :] - feat_chunk = self.model.extract_features( - source=x_chunk, - get_encoded=self.use_encoder_layer, - norm_output=self.norm_features, - ) - df = x_chunk.size(2) // feat_chunk.size(1) - delta = (size - start) // df - feat.append(feat_chunk[:, -delta:]) - return torch.cat(feat, 1).squeeze(0) - - -def load_cpc_model(checkpoint_path, layer=None): - state_dict = torch.load(checkpoint_path) - weights = state_dict["weights"] - config = state_dict["config"] - if layer is not None: - config["nLevelsGRU"] = layer - - encoder = CPCEncoder(config["hiddenEncoder"]) - ar_net = CPCAR( - config["hiddenEncoder"], config["hiddenGar"], False, config["nLevelsGRU"] - ) - - model = CPCModel(encoder, ar_net) - model.load_state_dict(weights, strict=False) - model.config = config - - return model - - -class ChannelNorm(nn.Module): - def __init__(self, num_features, epsilon=1e-05, affine=True): - super(ChannelNorm, self).__init__() - if affine: - self.weight = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) - self.bias = nn.parameter.Parameter(torch.Tensor(1, num_features, 1)) - else: - self.weight = None - self.bias = None - self.epsilon = epsilon - self.p = 0 - self.affine = affine - self.reset_parameters() - - def reset_parameters(self): - if self.affine: - torch.nn.init.ones_(self.weight) - torch.nn.init.zeros_(self.bias) - - def forward(self, x): - cum_mean = x.mean(dim=1, keepdim=True) - cum_var = x.var(dim=1, keepdim=True) - x = (x - cum_mean) * torch.rsqrt(cum_var + self.epsilon) - if self.weight is not None: - x = x * self.weight + self.bias - return x - - -class CPCEncoder(nn.Module): - def __init__(self, hidden_dim=512): - super(CPCEncoder, self).__init__() - self.conv0 = nn.Conv1d(1, hidden_dim, 10, stride=5, padding=3) - self.batchNorm0 = ChannelNorm(hidden_dim) - self.conv1 = nn.Conv1d(hidden_dim, hidden_dim, 8, stride=4, padding=2) - self.batchNorm1 = ChannelNorm(hidden_dim) - self.conv2 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) - self.batchNorm2 = ChannelNorm(hidden_dim) - self.conv3 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) - self.batchNorm3 = ChannelNorm(hidden_dim) - self.conv4 = nn.Conv1d(hidden_dim, hidden_dim, 4, stride=2, padding=1) - self.batchNorm4 = ChannelNorm(hidden_dim) - self.DOWNSAMPLING = 160 - - def get_output_dim(self): - return self.conv4.out_channels - - def forward(self, x): - x = F.relu(self.batchNorm0(self.conv0(x))) - x = F.relu(self.batchNorm1(self.conv1(x))) - x = F.relu(self.batchNorm2(self.conv2(x))) - x = F.relu(self.batchNorm3(self.conv3(x))) - x = F.relu(self.batchNorm4(self.conv4(x))) - return x - - -class CPCAR(nn.Module): - def __init__(self, dim_encoded, dim_output, keep_hidden, num_layers): - super(CPCAR, self).__init__() - self.baseNet = nn.LSTM( - dim_encoded, dim_output, num_layers=num_layers, batch_first=True - ) - self.hidden = None - self.keep_hidden = keep_hidden - - def get_output_dim(self): - return self.baseNet.hidden_size - - def forward(self, x): - try: - self.baseNet.flatten_parameters() - except RuntimeError: - pass - x, h = self.baseNet(x, self.hidden) - if self.keep_hidden: - if isinstance(h, tuple): - self.hidden = tuple(x.detach() for x in h) - else: - self.hidden = h.detach() - return x - - -class CPCModel(nn.Module): - def __init__(self, encoder, ar_net): - super(CPCModel, self).__init__() - self.gEncoder = encoder - self.gAR = ar_net - self.config = None - - def forward(self, x, label): - encoded = self.gEncoder(x).permute(0, 2, 1) - cpc_feature = self.gAR(encoded) - return cpc_feature, encoded, label - - def extract_features(self, source, get_encoded=False, norm_output=False): - cpc_feature, encoded, _ = self.forward(source, None) - if get_encoded: - cpc_feature = encoded - if norm_output: - mean = cpc_feature.mean(dim=1, keepdim=True) - var = cpc_feature.var(dim=1, keepdim=True) - cpc_feature = (cpc_feature - mean) / torch.sqrt(var + 1e-08) - return cpc_feature diff --git a/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/tokenize/__init__.py b/spaces/Harveenchadha/en_to_indic_translation/indic_nlp_library/indicnlp/tokenize/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py b/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py deleted file mode 100644 index 29104f4d8029524a80d6fa649b69a8acec0b8abc..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/en_to_indic_translation/subword-nmt/subword_nmt/subword_nmt.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import io -import sys -import codecs -import argparse - -from .learn_bpe import learn_bpe -from .apply_bpe import BPE, read_vocabulary -from .get_vocab import get_vocab -from .learn_joint_bpe_and_vocab import learn_joint_bpe_and_vocab - -from .learn_bpe import create_parser as create_learn_bpe_parser -from .apply_bpe import create_parser as create_apply_bpe_parser -from .get_vocab import create_parser as create_get_vocab_parser -from .learn_joint_bpe_and_vocab import create_parser as create_learn_joint_bpe_and_vocab_parser - -# hack for python2/3 compatibility -argparse.open = io.open - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.RawTextHelpFormatter, - description="subword-nmt: unsupervised word segmentation for neural machine translation and text generation ") - subparsers = parser.add_subparsers(dest='command', - help="""command to run. Run one of the commands with '-h' for more info. - -learn-bpe: learn BPE merge operations on input text. -apply-bpe: apply given BPE operations to input text. -get-vocab: extract vocabulary and word frequencies from input text. -learn-joint-bpe-and-vocab: executes recommended workflow for joint BPE.""") - - learn_bpe_parser = create_learn_bpe_parser(subparsers) - apply_bpe_parser = create_apply_bpe_parser(subparsers) - get_vocab_parser = create_get_vocab_parser(subparsers) - learn_joint_bpe_and_vocab_parser = create_learn_joint_bpe_and_vocab_parser(subparsers) - - args = parser.parse_args() - - if args.command == 'learn-bpe': - # read/write files as UTF-8 - if args.input.name != '': - args.input = codecs.open(args.input.name, encoding='utf-8') - if args.output.name != '': - args.output = codecs.open(args.output.name, 'w', encoding='utf-8') - - learn_bpe(args.input, args.output, args.symbols, args.min_frequency, args.verbose, - is_dict=args.dict_input, total_symbols=args.total_symbols) - elif args.command == 'apply-bpe': - # read/write files as UTF-8 - args.codes = codecs.open(args.codes.name, encoding='utf-8') - if args.input.name != '': - args.input = codecs.open(args.input.name, encoding='utf-8') - if args.output.name != '': - args.output = codecs.open(args.output.name, 'w', encoding='utf-8') - if args.vocabulary: - args.vocabulary = codecs.open(args.vocabulary.name, encoding='utf-8') - - if args.vocabulary: - vocabulary = read_vocabulary(args.vocabulary, args.vocabulary_threshold) - else: - vocabulary = None - - if sys.version_info < (3, 0): - args.separator = args.separator.decode('UTF-8') - if args.glossaries: - args.glossaries = [g.decode('UTF-8') for g in args.glossaries] - - bpe = BPE(args.codes, args.merges, args.separator, vocabulary, args.glossaries) - - for line in args.input: - args.output.write(bpe.process_line(line, args.dropout)) - - elif args.command == 'get-vocab': - if args.input.name != '': - args.input = codecs.open(args.input.name, encoding='utf-8') - if args.output.name != '': - args.output = codecs.open(args.output.name, 'w', encoding='utf-8') - get_vocab(args.input, args.output) - elif args.command == 'learn-joint-bpe-and-vocab': - learn_joint_bpe_and_vocab(args) - if sys.version_info < (3, 0): - args.separator = args.separator.decode('UTF-8') - else: - raise Exception('Invalid command provided') - - -# python 2/3 compatibility -if sys.version_info < (3, 0): - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin) -else: - sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer) - sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer) - sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer) diff --git a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py b/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py deleted file mode 100644 index 66c797cc583b6dadc1903194919a8faea509be0d..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/oiTrans/indic_nlp_library/indicnlp/script/indic_scripts.py +++ /dev/null @@ -1,301 +0,0 @@ -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -import pandas as pd -import numpy as np -import os - -from indicnlp import common -from indicnlp.common import IndicNlpException -from indicnlp import langinfo as li - -### -# Phonetic Information about script characters -### - -""" Phonetic data about all languages except Tamil """ -ALL_PHONETIC_DATA=None - -""" Phonetic data for Tamil """ -TAMIL_PHONETIC_DATA=None - -""" Phonetic vector for all languages except Tamil """ -ALL_PHONETIC_VECTORS=None - -""" Phonetic vector for Tamil """ -TAMIL_PHONETIC_VECTORS=None - -""" Length of phonetic vector """ -PHONETIC_VECTOR_LENGTH=38 - -""" Start offset for the phonetic feature vector in the phonetic data vector """ -PHONETIC_VECTOR_START_OFFSET=6 - -## PHONETIC PROPERTIES in order in which they occur in the vector -## This list must be in sync with the keys in the PV_PROP_RANGES dictionary -PV_PROP=['basic_type', - 'vowel_length', - 'vowel_strength', - 'vowel_status', - 'consonant_type', - 'articulation_place', - 'aspiration', - 'voicing', - 'nasalization', - 'vowel_horizontal', - 'vowel_vertical', - 'vowel_roundness', - ] - -### -# Bit vector ranges for various properties -### - -PV_PROP_RANGES={ - 'basic_type': [0,6], - 'vowel_length': [6,8], - 'vowel_strength': [8,11], - 'vowel_status': [11,13], - 'consonant_type': [13,18], - 'articulation_place': [18,23], - 'aspiration': [23,25], - 'voicing': [25,27], - 'nasalization': [27,29], - 'vowel_horizontal': [29,32], - 'vowel_vertical': [32,36], - 'vowel_roundness': [36,38], - } - - -#### -# Indexes into the Phonetic Vector -#### -PVIDX_BT_VOWEL=0 -PVIDX_BT_CONSONANT=1 -PVIDX_BT_NUKTA=2 -PVIDX_BT_HALANT=3 -PVIDX_BT_ANUSVAAR=4 -PVIDX_BT_MISC=5 -PVIDX_BT_S=PVIDX_BT_VOWEL -PVIDX_BT_E=PVIDX_BT_MISC+1 - -PVIDX_VSTAT_DEP=12 - -##### -# Unicode information about characters -##### - -SCRIPT_OFFSET_START=0 -SCRIPT_OFFSET_RANGE=0x80 - -def init(): - """ - To be called by library loader, do not call it in your program - """ - - global ALL_PHONETIC_DATA, ALL_PHONETIC_VECTORS, TAMIL_PHONETIC_DATA, TAMIL_PHONETIC_VECTORS, PHONETIC_VECTOR_LENGTH, PHONETIC_VECTOR_START_OFFSET - - ALL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','all_script_phonetic_data.csv'),encoding='utf-8') - TAMIL_PHONETIC_DATA=pd.read_csv(os.path.join(common.get_resources_path(),'script','tamil_script_phonetic_data.csv'),encoding='utf-8') - - ALL_PHONETIC_VECTORS= ALL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values - TAMIL_PHONETIC_VECTORS=TAMIL_PHONETIC_DATA.iloc[:,PHONETIC_VECTOR_START_OFFSET:].values - - PHONETIC_VECTOR_LENGTH=ALL_PHONETIC_VECTORS.shape[1] - -def is_supported_language(lang): - return lang in list(li.SCRIPT_RANGES.keys()) - -def get_offset(c,lang): - if not is_supported_language(lang): - raise IndicNlpException('Language {} not supported'.format(lang)) - return ord(c)-li.SCRIPT_RANGES[lang][0] - -def offset_to_char(off,lang): - """ - Applicable to Brahmi derived Indic scripts - """ - if not is_supported_language(lang): - raise IndicNlpException('Language {} not supported'.format(lang)) - return chr(off+li.SCRIPT_RANGES[lang][0]) - -def is_indiclang_char(c,lang): - """ - Applicable to Brahmi derived Indic scripts - Note that DANDA and DOUBLE_DANDA have the same Unicode codepoint for all Indic scripts - """ - if not is_supported_language(lang): - raise IndicNlpException('Language {} not supported'.format(lang)) - o=get_offset(c,lang) - return (o>=SCRIPT_OFFSET_START and o=li.COORDINATED_RANGE_START_INCLUSIVE and c_offset<=li.COORDINATED_RANGE_END_INCLUSIVE) - -def in_coordinated_range(c,lang): - if not is_supported_language(lang): - raise IndicNlpException('Language {} not supported'.format(lang)) - return in_coordinated_range_offset(get_offset(c,lang)) - -def get_phonetic_info(lang): - if not is_supported_language(lang): - raise IndicNlpException('Language {} not supported'.format(lang)) - phonetic_data= ALL_PHONETIC_DATA if lang!=li.LC_TA else TAMIL_PHONETIC_DATA - phonetic_vectors= ALL_PHONETIC_VECTORS if lang!=li.LC_TA else TAMIL_PHONETIC_VECTORS - - return (phonetic_data, phonetic_vectors) - -def invalid_vector(): - ## TODO: check if np datatype is correct? - return np.array([0]*PHONETIC_VECTOR_LENGTH) - -def get_phonetic_feature_vector(c,lang): - - offset=get_offset(c,lang) - - if not in_coordinated_range_offset(offset): - return invalid_vector() - - phonetic_data, phonetic_vectors= get_phonetic_info(lang) - - if phonetic_data.iloc[offset]['Valid Vector Representation']==0: - return invalid_vector() - - return phonetic_vectors[offset] - -def get_phonetic_feature_vector_offset(offset,lang): - - if not in_coordinated_range_offset(offset): - return invalid_vector() - - phonetic_data, phonetic_vectors= get_phonetic_info(lang) - - if phonetic_data.iloc[offset]['Valid Vector Representation']==0: - return invalid_vector() - - return phonetic_vectors[offset] - -### Unary operations on vectors -def is_valid(v): - return np.sum(v)>0 - -def is_vowel(v): - return v[PVIDX_BT_VOWEL]==1 - -def is_consonant(v): - return v[PVIDX_BT_CONSONANT]==1 - -def is_halant(v): - return v[PVIDX_BT_HALANT]==1 - -def is_nukta(v): - return v[PVIDX_BT_NUKTA]==1 - -def is_anusvaar(v): - return v[PVIDX_BT_ANUSVAAR]==1 - -def is_misc(v): - return v[PVIDX_BT_MISC]==1 - -def is_dependent_vowel(v): - return is_vowel(v) and v[PVIDX_VSTAT_DEP]==1 - -def is_plosive(v): - return is_consonant(v) and get_property_vector(v,'consonant_type')[0]==1 - -### Binary operations on phonetic vectors - -def or_vectors(v1,v2): - return np.array([ 1 if (b1+b2)>=1 else 0 for b1,b2 in zip(v1,v2) ]) - -def xor_vectors(v1,v2): - return np.array([ 1 if b1!=b2 else 0 for b1,b2 in zip(v1,v2) ]) - -### Getting properties from phonetic vectors - -def get_property_vector(v,prop_name): - return v[PV_PROP_RANGES[prop_name][0]:PV_PROP_RANGES[prop_name][1]] - -def get_property_value(v,prop_name): - factor_bits=get_property_vector(v,prop_name).tolist() - - v=0 - c=1 - for b in factor_bits[::-1]: - v+=(c*b) - c=c*2.0 - - return int(v) - -def lcsr_indic(srcw,tgtw,slang,tlang): - """ - compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level. - This works for Indic scripts by mapping both languages to a common script - - srcw: source language string - tgtw: source language string - slang: source language - tlang: target language - """ - score_mat=np.zeros((len(srcw)+1,len(tgtw)+1)) - - for si,sc in enumerate(srcw,1): - for ti,tc in enumerate(tgtw,1): - so=get_offset(sc,slang) - to=get_offset(tc,tlang) - - if in_coordinated_range_offset(so) and in_coordinated_range_offset(to) and so==to: - score_mat[si,ti]=score_mat[si-1,ti-1]+1.0 - elif not (in_coordinated_range_offset(so) or in_coordinated_range_offset(to)) and sc==tc: - score_mat[si,ti]=score_mat[si-1,ti-1]+1.0 - else: - score_mat[si,ti]= max( - score_mat[si,ti-1], - score_mat[si-1,ti]) - - return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw))) - -def lcsr_any(srcw,tgtw): - """ - LCSR computation if both languages have the same script - """ - score_mat=np.zeros((len(srcw)+1,len(tgtw)+1)) - - for si,sc in enumerate(srcw,1): - for ti,tc in enumerate(tgtw,1): - - if sc==tc: - score_mat[si,ti]=score_mat[si-1,ti-1]+1.0 - else: - score_mat[si,ti]= max( - score_mat[si,ti-1], - score_mat[si-1,ti]) - - return (score_mat[-1,-1]/float(max(len(srcw),len(tgtw))),float(len(srcw)),float(len(tgtw))) - -def lcsr(srcw,tgtw,slang,tlang): - """ - compute the Longest Common Subsequence Ratio (LCSR) between two strings at the character level. - - srcw: source language string - tgtw: source language string - slang: source language - tlang: target language - """ - - if slang==tlang or not is_supported_language(slang) or not is_supported_language(tlang): - return lcsr_any(srcw,tgtw,slang,tlang) - else: - return lcsr_indic(srcw,tgtw) - - - diff --git a/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py b/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py deleted file mode 100644 index b77d1aeb34020c4f299f5122798c934f2665db21..0000000000000000000000000000000000000000 --- a/spaces/Hasani/Binary-Video-Classification-In-The-Wild/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import gradio as gr -import torch -import numpy as np -from transformers import AutoProcessor, AutoModel -from PIL import Image -import cv2 -from concurrent.futures import ThreadPoolExecutor -import os - - -MODEL_NAME = "microsoft/xclip-base-patch16-zero-shot" -CLIP_LEN = 32 - -# Check if GPU is available and set the device -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -print (device) - -# Load model and processor once and move them to the device -processor = AutoProcessor.from_pretrained(MODEL_NAME) -model = AutoModel.from_pretrained(MODEL_NAME).to(device) - -def get_video_length(file_path): - cap = cv2.VideoCapture(file_path) - length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - cap.release() - return length - -def read_video_opencv(file_path, indices): - frames = [] - with ThreadPoolExecutor() as executor: - futures = [executor.submit(get_frame, file_path, i) for i in indices] - for future in futures: - frame = future.result() - if frame is not None: - frames.append(frame) - return frames - -def get_frame(file_path, index): - cap = cv2.VideoCapture(file_path) - cap.set(cv2.CAP_PROP_POS_FRAMES, index) - ret, frame = cap.read() - cap.release() - if ret: - return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - return None - -def sample_uniform_frame_indices(clip_len, seg_len): - if seg_len < clip_len: - repeat_factor = np.ceil(clip_len / seg_len).astype(int) - indices = np.arange(seg_len).tolist() * repeat_factor - indices = indices[:clip_len] - else: - spacing = seg_len // clip_len - indices = [i * spacing for i in range(clip_len)] - return np.array(indices).astype(np.int64) - -def concatenate_frames(frames, clip_len): - layout = { 32: (4, 8) } - rows, cols = layout[clip_len] - combined_image = Image.new('RGB', (frames[0].shape[1]*cols, frames[0].shape[0]*rows)) - frame_iter = iter(frames) - y_offset = 0 - for i in range(rows): - x_offset = 0 - for j in range(cols): - img = Image.fromarray(next(frame_iter)) - combined_image.paste(img, (x_offset, y_offset)) - x_offset += frames[0].shape[1] - y_offset += frames[0].shape[0] - return combined_image - -def model_interface(uploaded_video, activity): - video_length = get_video_length(uploaded_video) - indices = sample_uniform_frame_indices(CLIP_LEN, seg_len=video_length) - video = read_video_opencv(uploaded_video, indices) - concatenated_image = concatenate_frames(video, CLIP_LEN) - - activities_list = [activity, "other"] - inputs = processor( - text=activities_list, - videos=list(video), - return_tensors="pt", - padding=True, - ) - - # Move the tensors to the same device as the model - for key, value in inputs.items(): - if isinstance(value, torch.Tensor): - inputs[key] = value.to(device) - - with torch.no_grad(): - outputs = model(**inputs) - - logits_per_video = outputs.logits_per_video - probs = logits_per_video.softmax(dim=1) - - results_probs = [] - results_logits = [] - max_prob_index = torch.argmax(probs[0]).item() - for i in range(len(activities_list)): - current_activity = activities_list[i] - prob = float(probs[0][i].cpu()) # Move tensor data to CPU for further processing - logit = float(logits_per_video[0][i].cpu()) # Move tensor data to CPU for further processing - results_probs.append((current_activity, f"Probability: {prob * 100:.2f}%")) - results_logits.append((current_activity, f"Raw Score: {logit:.2f}")) - - likely_label = activities_list[max_prob_index] - likely_probability = float(probs[0][max_prob_index].cpu()) * 100 # Move tensor data to CPU - - activity_perfomed = False - if likely_label != 'other': - activity_perfomed = True - - return activity_perfomed, concatenated_image, results_probs, results_logits, [likely_label, likely_probability] - - -# Load video paths from the folder -#video_folder = "Action Detection Samples" -#video_files = [os.path.join(video_folder, file) for file in os.listdir(video_folder) if file.endswith('.mp4')] # considering only mp4 files - -# Create examples: assuming every video is about 'dancing' -#examples = [[video, "taking a shot"] for video in video_files] - -iface = gr.Interface( - fn=model_interface, - inputs=[ - gr.components.Video(label="Upload a video file"), - gr.components.Text(default="taking a shot", label="Desired Activity to Recognize"), - ], - outputs=[ - gr.components.Text(type="text", label="True/False"), - gr.components.Image(type="pil", label="Sampled Frames"), - gr.components.Text(type="text", label="Probabilities"), - gr.components.Text(type="text", label="Raw Scores"), - gr.components.Text(type="text", label="Top Prediction"), - - ], - title="Action Detection Video", - description="[Author: Ibrahim Hasani] This Method uses X-CLIP [Version: ZERO SHOT / SAMPLED FRAMES = 32] to determine if an action is being performed in a video or not. (Binaray Classifier). It contrasts an Action against multiple negative labels that are supposedly far enough in the latent semantic space vs the target label. Do not use negative labels in the desired activity, rather the action to be performed.", - live=False, - theme=gr.themes.Monochrome(), - #examples=examples # Add examples to the interface -) - -iface.launch() \ No newline at end of file diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html deleted file mode 100644 index 9fad90d1274bd58111abb76e42e8fbee14c637f4..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_texts/zipf/zipf_fig.html +++ /dev/null @@ -1,14 +0,0 @@ - - - -
      -
      - - \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh b/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh deleted file mode 100644 index c523d92634d9b61b97bbcdbfd17dfc33465bfc09..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/m2m_100/tokenizers/seg_ko.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -SCRIPT=`realpath $0` -MECAB=`dirname $SCRIPT`/thirdparty/mecab-0.996-ko-0.9.2 - -export PATH=$PATH:"$MECAB/bin":"$MECAB/lib" -export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:"$MECAB/lib" - -cat - | mecab -O wakati diff --git a/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py b/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py deleted file mode 100644 index b0bc913651bd76667e25c214acb70f2bca19e185..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/noisychannel/rerank_score_bw.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import os -from contextlib import redirect_stdout - -from fairseq import options -from fairseq_cli import generate - -from examples.noisychannel import rerank_options, rerank_utils - - -def score_bw(args): - if args.backwards1: - scorer1_src = args.target_lang - scorer1_tgt = args.source_lang - else: - scorer1_src = args.source_lang - scorer1_tgt = args.target_lang - - if args.score_model2 is not None: - if args.backwards2: - scorer2_src = args.target_lang - scorer2_tgt = args.source_lang - else: - scorer2_src = args.source_lang - scorer2_tgt = args.target_lang - - rerank1_is_gen = ( - args.gen_model == args.score_model1 and args.source_prefix_frac is None - ) - rerank2_is_gen = ( - args.gen_model == args.score_model2 and args.source_prefix_frac is None - ) - - ( - pre_gen, - left_to_right_preprocessed_dir, - right_to_left_preprocessed_dir, - backwards_preprocessed_dir, - lm_preprocessed_dir, - ) = rerank_utils.get_directories( - args.data_dir_name, - args.num_rescore, - args.gen_subset, - args.gen_model_name, - args.shard_id, - args.num_shards, - args.sampling, - args.prefix_len, - args.target_prefix_frac, - args.source_prefix_frac, - ) - - score1_file = rerank_utils.rescore_file_name( - pre_gen, - args.prefix_len, - args.model1_name, - target_prefix_frac=args.target_prefix_frac, - source_prefix_frac=args.source_prefix_frac, - backwards=args.backwards1, - ) - - if args.score_model2 is not None: - score2_file = rerank_utils.rescore_file_name( - pre_gen, - args.prefix_len, - args.model2_name, - target_prefix_frac=args.target_prefix_frac, - source_prefix_frac=args.source_prefix_frac, - backwards=args.backwards2, - ) - - if args.right_to_left1: - rerank_data1 = right_to_left_preprocessed_dir - elif args.backwards1: - rerank_data1 = backwards_preprocessed_dir - else: - rerank_data1 = left_to_right_preprocessed_dir - - gen_param = ["--batch-size", str(128), "--score-reference", "--gen-subset", "train"] - if not rerank1_is_gen and not os.path.isfile(score1_file): - print("STEP 4: score the translations for model 1") - - model_param1 = [ - "--path", - args.score_model1, - "--source-lang", - scorer1_src, - "--target-lang", - scorer1_tgt, - ] - gen_model1_param = [rerank_data1] + gen_param + model_param1 - - gen_parser = options.get_generation_parser() - input_args = options.parse_args_and_arch(gen_parser, gen_model1_param) - - with open(score1_file, "w") as f: - with redirect_stdout(f): - generate.main(input_args) - - if ( - args.score_model2 is not None - and not os.path.isfile(score2_file) - and not rerank2_is_gen - ): - print("STEP 4: score the translations for model 2") - - if args.right_to_left2: - rerank_data2 = right_to_left_preprocessed_dir - elif args.backwards2: - rerank_data2 = backwards_preprocessed_dir - else: - rerank_data2 = left_to_right_preprocessed_dir - - model_param2 = [ - "--path", - args.score_model2, - "--source-lang", - scorer2_src, - "--target-lang", - scorer2_tgt, - ] - gen_model2_param = [rerank_data2] + gen_param + model_param2 - - gen_parser = options.get_generation_parser() - input_args = options.parse_args_and_arch(gen_parser, gen_model2_param) - - with open(score2_file, "w") as f: - with redirect_stdout(f): - generate.main(input_args) - - -def cli_main(): - parser = rerank_options.get_reranking_parser() - args = options.parse_args_and_arch(parser) - score_bw(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py b/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py deleted file mode 100644 index ff3e14bf14770638524ef6067b558e455dbe5f2b..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/data/language_pair_dataset.py +++ /dev/null @@ -1,471 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import numpy as np -import torch -from fairseq.data import FairseqDataset, data_utils - - -logger = logging.getLogger(__name__) - - -def collate( - samples, - pad_idx, - eos_idx, - left_pad_source=True, - left_pad_target=False, - input_feeding=True, - pad_to_length=None, - pad_to_multiple=1, -): - if len(samples) == 0: - return {} - - def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): - return data_utils.collate_tokens( - [s[key] for s in samples], - pad_idx, - eos_idx, - left_pad, - move_eos_to_beginning, - pad_to_length=pad_to_length, - pad_to_multiple=pad_to_multiple, - ) - - def check_alignment(alignment, src_len, tgt_len): - if alignment is None or len(alignment) == 0: - return False - if ( - alignment[:, 0].max().item() >= src_len - 1 - or alignment[:, 1].max().item() >= tgt_len - 1 - ): - logger.warning("alignment size mismatch found, skipping alignment!") - return False - return True - - def compute_alignment_weights(alignments): - """ - Given a tensor of shape [:, 2] containing the source-target indices - corresponding to the alignments, a weight vector containing the - inverse frequency of each target index is computed. - For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then - a tensor containing [1., 0.5, 0.5, 1] should be returned (since target - index 3 is repeated twice) - """ - align_tgt = alignments[:, 1] - _, align_tgt_i, align_tgt_c = torch.unique( - align_tgt, return_inverse=True, return_counts=True - ) - align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] - return 1.0 / align_weights.float() - - id = torch.LongTensor([s["id"] for s in samples]) - src_tokens = merge( - "source", - left_pad=left_pad_source, - pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, - ) - # sort by descending source length - src_lengths = torch.LongTensor( - [s["source"].ne(pad_idx).long().sum() for s in samples] - ) - src_lengths, sort_order = src_lengths.sort(descending=True) - id = id.index_select(0, sort_order) - src_tokens = src_tokens.index_select(0, sort_order) - - prev_output_tokens = None - target = None - if samples[0].get("target", None) is not None: - target = merge( - "target", - left_pad=left_pad_target, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - target = target.index_select(0, sort_order) - tgt_lengths = torch.LongTensor( - [s["target"].ne(pad_idx).long().sum() for s in samples] - ).index_select(0, sort_order) - ntokens = tgt_lengths.sum().item() - - if samples[0].get("prev_output_tokens", None) is not None: - prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) - elif input_feeding: - # we create a shifted version of targets for feeding the - # previous output token(s) into the next decoder step - prev_output_tokens = merge( - "target", - left_pad=left_pad_target, - move_eos_to_beginning=True, - pad_to_length=pad_to_length["target"] - if pad_to_length is not None - else None, - ) - else: - ntokens = src_lengths.sum().item() - - batch = { - "id": id, - "nsentences": len(samples), - "ntokens": ntokens, - "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, - "target": target, - } - if prev_output_tokens is not None: - batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( - 0, sort_order - ) - - if samples[0].get("alignment", None) is not None: - bsz, tgt_sz = batch["target"].shape - src_sz = batch["net_input"]["src_tokens"].shape[1] - - offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) - offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz - if left_pad_source: - offsets[:, 0] += src_sz - src_lengths - if left_pad_target: - offsets[:, 1] += tgt_sz - tgt_lengths - - alignments = [ - alignment + offset - for align_idx, offset, src_len, tgt_len in zip( - sort_order, offsets, src_lengths, tgt_lengths - ) - for alignment in [samples[align_idx]["alignment"].view(-1, 2)] - if check_alignment(alignment, src_len, tgt_len) - ] - - if len(alignments) > 0: - alignments = torch.cat(alignments, dim=0) - align_weights = compute_alignment_weights(alignments) - - batch["alignments"] = alignments - batch["align_weights"] = align_weights - - if samples[0].get("constraints", None) is not None: - # Collate the packed constraints across the samples, padding to - # the length of the longest sample. - lens = [sample.get("constraints").size(0) for sample in samples] - max_len = max(lens) - constraints = torch.zeros((len(samples), max(lens))).long() - for i, sample in enumerate(samples): - constraints[i, 0 : lens[i]] = samples[i].get("constraints") - batch["constraints"] = constraints.index_select(0, sort_order) - - return batch - - -class LanguagePairDataset(FairseqDataset): - """ - A pair of torch.utils.data.Datasets. - - Args: - src (torch.utils.data.Dataset): source dataset to wrap - src_sizes (List[int]): source sentence lengths - src_dict (~fairseq.data.Dictionary): source vocabulary - tgt (torch.utils.data.Dataset, optional): target dataset to wrap - tgt_sizes (List[int], optional): target sentence lengths - tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary - left_pad_source (bool, optional): pad source tensors on the left side - (default: True). - left_pad_target (bool, optional): pad target tensors on the left side - (default: False). - shuffle (bool, optional): shuffle dataset elements before batching - (default: True). - input_feeding (bool, optional): create a shifted version of the targets - to be passed into the model for teacher forcing (default: True). - remove_eos_from_source (bool, optional): if set, removes eos from end - of source if it's present (default: False). - append_eos_to_target (bool, optional): if set, appends eos to end of - target if it's absent (default: False). - align_dataset (torch.utils.data.Dataset, optional): dataset - containing alignments. - constraints (Tensor, optional): 2d tensor with a concatenated, zero- - delimited list of constraints for each sentence. - append_bos (bool, optional): if set, appends bos to the beginning of - source/target sentence. - num_buckets (int, optional): if set to a value greater than 0, then - batches will be bucketed into the given number of batch shapes. - src_lang_id (int, optional): source language ID, if set, the collated batch - will contain a field 'src_lang_id' in 'net_input' which indicates the - source language of the samples. - tgt_lang_id (int, optional): target language ID, if set, the collated batch - will contain a field 'tgt_lang_id' which indicates the target language - of the samples. - """ - - def __init__( - self, - src, - src_sizes, - src_dict, - tgt=None, - tgt_sizes=None, - tgt_dict=None, - left_pad_source=True, - left_pad_target=False, - shuffle=True, - input_feeding=True, - remove_eos_from_source=False, - append_eos_to_target=False, - align_dataset=None, - constraints=None, - append_bos=False, - eos=None, - num_buckets=0, - src_lang_id=None, - tgt_lang_id=None, - pad_to_multiple=1, - ): - if tgt_dict is not None: - assert src_dict.pad() == tgt_dict.pad() - assert src_dict.eos() == tgt_dict.eos() - assert src_dict.unk() == tgt_dict.unk() - if tgt is not None: - assert len(src) == len( - tgt - ), "Source and target must contain the same number of examples" - self.src = src - self.tgt = tgt - self.src_sizes = np.array(src_sizes) - self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None - self.sizes = ( - np.vstack((self.src_sizes, self.tgt_sizes)).T - if self.tgt_sizes is not None - else self.src_sizes - ) - self.src_dict = src_dict - self.tgt_dict = tgt_dict - self.left_pad_source = left_pad_source - self.left_pad_target = left_pad_target - self.shuffle = shuffle - self.input_feeding = input_feeding - self.remove_eos_from_source = remove_eos_from_source - self.append_eos_to_target = append_eos_to_target - self.align_dataset = align_dataset - if self.align_dataset is not None: - assert ( - self.tgt_sizes is not None - ), "Both source and target needed when alignments are provided" - self.constraints = constraints - self.append_bos = append_bos - self.eos = eos if eos is not None else src_dict.eos() - self.src_lang_id = src_lang_id - self.tgt_lang_id = tgt_lang_id - if num_buckets > 0: - from fairseq.data import BucketPadLengthDataset - - self.src = BucketPadLengthDataset( - self.src, - sizes=self.src_sizes, - num_buckets=num_buckets, - pad_idx=self.src_dict.pad(), - left_pad=self.left_pad_source, - ) - self.src_sizes = self.src.sizes - logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) - if self.tgt is not None: - self.tgt = BucketPadLengthDataset( - self.tgt, - sizes=self.tgt_sizes, - num_buckets=num_buckets, - pad_idx=self.tgt_dict.pad(), - left_pad=self.left_pad_target, - ) - self.tgt_sizes = self.tgt.sizes - logger.info( - "bucketing target lengths: {}".format(list(self.tgt.buckets)) - ) - - # determine bucket sizes using self.num_tokens, which will return - # the padded lengths (thanks to BucketPadLengthDataset) - num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) - self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) - self.buckets = [ - (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) - ] - else: - self.buckets = None - self.pad_to_multiple = pad_to_multiple - - def get_batch_shapes(self): - return self.buckets - - def __getitem__(self, index): - tgt_item = self.tgt[index] if self.tgt is not None else None - src_item = self.src[index] - # Append EOS to end of tgt sentence if it does not have an EOS and remove - # EOS from end of src sentence if it exists. This is useful when we use - # use existing datasets for opposite directions i.e., when we want to - # use tgt_dataset as src_dataset and vice versa - if self.append_eos_to_target: - eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() - if self.tgt and self.tgt[index][-1] != eos: - tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) - - if self.append_bos: - bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() - if self.tgt and self.tgt[index][0] != bos: - tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) - - bos = self.src_dict.bos() - if self.src[index][0] != bos: - src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) - - if self.remove_eos_from_source: - eos = self.src_dict.eos() - if self.src[index][-1] == eos: - src_item = self.src[index][:-1] - - example = { - "id": index, - "source": src_item, - "target": tgt_item, - } - if self.align_dataset is not None: - example["alignment"] = self.align_dataset[index] - if self.constraints is not None: - example["constraints"] = self.constraints[index] - return example - - def __len__(self): - return len(self.src) - - def collater(self, samples, pad_to_length=None): - """Merge a list of samples to form a mini-batch. - - Args: - samples (List[dict]): samples to collate - pad_to_length (dict, optional): a dictionary of - {'source': source_pad_to_length, 'target': target_pad_to_length} - to indicate the max length to pad to in source and target respectively. - - Returns: - dict: a mini-batch with the following keys: - - - `id` (LongTensor): example IDs in the original input order - - `ntokens` (int): total number of tokens in the batch - - `net_input` (dict): the input to the Model, containing keys: - - - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in - the source sentence of shape `(bsz, src_len)`. Padding will - appear on the left if *left_pad_source* is ``True``. - - `src_lengths` (LongTensor): 1D Tensor of the unpadded - lengths of each source sentence of shape `(bsz)` - - `prev_output_tokens` (LongTensor): a padded 2D Tensor of - tokens in the target sentence, shifted right by one - position for teacher forcing, of shape `(bsz, tgt_len)`. - This key will not be present if *input_feeding* is - ``False``. Padding will appear on the left if - *left_pad_target* is ``True``. - - `src_lang_id` (LongTensor): a long Tensor which contains source - language IDs of each sample in the batch - - - `target` (LongTensor): a padded 2D Tensor of tokens in the - target sentence of shape `(bsz, tgt_len)`. Padding will appear - on the left if *left_pad_target* is ``True``. - - `tgt_lang_id` (LongTensor): a long Tensor which contains target language - IDs of each sample in the batch - """ - res = collate( - samples, - pad_idx=self.src_dict.pad(), - eos_idx=self.eos, - left_pad_source=self.left_pad_source, - left_pad_target=self.left_pad_target, - input_feeding=self.input_feeding, - pad_to_length=pad_to_length, - pad_to_multiple=self.pad_to_multiple, - ) - if self.src_lang_id is not None or self.tgt_lang_id is not None: - src_tokens = res["net_input"]["src_tokens"] - bsz = src_tokens.size(0) - if self.src_lang_id is not None: - res["net_input"]["src_lang_id"] = ( - torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) - ) - if self.tgt_lang_id is not None: - res["tgt_lang_id"] = ( - torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) - ) - return res - - def num_tokens(self, index): - """Return the number of tokens in a sample. This value is used to - enforce ``--max-tokens`` during batching.""" - return max( - self.src_sizes[index], - self.tgt_sizes[index] if self.tgt_sizes is not None else 0, - ) - - def num_tokens_vec(self, indices): - """Return the number of tokens for a set of positions defined by indices. - This value is used to enforce ``--max-tokens`` during batching.""" - sizes = self.src_sizes[indices] - if self.tgt_sizes is not None: - sizes = np.maximum(sizes, self.tgt_sizes[indices]) - return sizes - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return ( - self.src_sizes[index], - self.tgt_sizes[index] if self.tgt_sizes is not None else 0, - ) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)).astype(np.int64) - else: - indices = np.arange(len(self), dtype=np.int64) - if self.buckets is None: - # sort by target length, then source length - if self.tgt_sizes is not None: - indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] - return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] - else: - # sort by bucketed_num_tokens, which is: - # max(padded_src_len, padded_tgt_len) - return indices[ - np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") - ] - - @property - def supports_prefetch(self): - return getattr(self.src, "supports_prefetch", False) and ( - getattr(self.tgt, "supports_prefetch", False) or self.tgt is None - ) - - def prefetch(self, indices): - self.src.prefetch(indices) - if self.tgt is not None: - self.tgt.prefetch(indices) - if self.align_dataset is not None: - self.align_dataset.prefetch(indices) - - def filter_indices_by_size(self, indices, max_sizes): - """Filter a list of sample indices. Remove those that are longer - than specified in max_sizes. - - Args: - indices (np.array): original array of sample indices - max_sizes (int or list[int] or tuple[int]): max sample size, - can be defined separately for src and tgt (then list or tuple) - - Returns: - np.array: filtered sample array - list: list of removed indices - """ - return data_utils.filter_paired_dataset_indices_by_size( - self.src_sizes, self.tgt_sizes, indices, max_sizes, - ) diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh deleted file mode 100644 index 4a4727b0129007d9b0eed3fc25780adb565965a2..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/debug/analyze_overlapping_masks.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -BASEDIR="$(dirname $0)" - -# paths are valid for mml7 - -# select images -#ls /data/inpainting/work/data/train | shuf | head -2000 | xargs -n1 -I{} cp {} /data/inpainting/mask_analysis/src - -# generate masks -#"$BASEDIR/../gen_debug_mask_dataset.py" \ -# "$BASEDIR/../../configs/debug_mask_gen.yaml" \ -# "/data/inpainting/mask_analysis/src" \ -# "/data/inpainting/mask_analysis/generated" - -# predict -#"$BASEDIR/../predict.py" \ -# model.path="simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/saved_checkpoint/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15_epoch22-step-574999" \ -# indir="/data/inpainting/mask_analysis/generated" \ -# outdir="/data/inpainting/mask_analysis/predicted" \ -# dataset.img_suffix=.jpg \ -# +out_ext=.jpg - -# analyze good and bad samples -"$BASEDIR/../analyze_errors.py" \ - --only-report \ - --n-jobs 8 \ - "$BASEDIR/../../configs/analyze_mask_errors.yaml" \ - "/data/inpainting/mask_analysis/small/generated" \ - "/data/inpainting/mask_analysis/small/predicted" \ - "/data/inpainting/mask_analysis/small/report" diff --git a/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py b/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py deleted file mode 100644 index 3a137778e45c464c079658ecb87ec53270e789f7..0000000000000000000000000000000000000000 --- a/spaces/JunchuanYu/SegRS/segment_anything/utils/amg.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -import math -from copy import deepcopy -from itertools import product -from typing import Any, Dict, Generator, ItemsView, List, Tuple - - -class MaskData: - """ - A structure for storing masks and their related data in batched format. - Implements basic filtering and concatenation. - """ - - def __init__(self, **kwargs) -> None: - for v in kwargs.values(): - assert isinstance( - v, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats = dict(**kwargs) - - def __setitem__(self, key: str, item: Any) -> None: - assert isinstance( - item, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats[key] = item - - def __delitem__(self, key: str) -> None: - del self._stats[key] - - def __getitem__(self, key: str) -> Any: - return self._stats[key] - - def items(self) -> ItemsView[str, Any]: - return self._stats.items() - - def filter(self, keep: torch.Tensor) -> None: - for k, v in self._stats.items(): - if v is None: - self._stats[k] = None - elif isinstance(v, torch.Tensor): - self._stats[k] = v[torch.as_tensor(keep, device=v.device)] - elif isinstance(v, np.ndarray): - self._stats[k] = v[keep.detach().cpu().numpy()] - elif isinstance(v, list) and keep.dtype == torch.bool: - self._stats[k] = [a for i, a in enumerate(v) if keep[i]] - elif isinstance(v, list): - self._stats[k] = [v[i] for i in keep] - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def cat(self, new_stats: "MaskData") -> None: - for k, v in new_stats.items(): - if k not in self._stats or self._stats[k] is None: - self._stats[k] = deepcopy(v) - elif isinstance(v, torch.Tensor): - self._stats[k] = torch.cat([self._stats[k], v], dim=0) - elif isinstance(v, np.ndarray): - self._stats[k] = np.concatenate([self._stats[k], v], axis=0) - elif isinstance(v, list): - self._stats[k] = self._stats[k] + deepcopy(v) - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def to_numpy(self) -> None: - for k, v in self._stats.items(): - if isinstance(v, torch.Tensor): - self._stats[k] = v.detach().cpu().numpy() - - -def is_box_near_crop_edge( - boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 -) -> torch.Tensor: - """Filter masks at the edge of a crop, but not at the edge of the original image.""" - crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) - orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) - boxes = uncrop_boxes_xyxy(boxes, crop_box).float() - near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) - near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) - near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) - return torch.any(near_crop_edge, dim=1) - - -def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: - box_xywh = deepcopy(box_xyxy) - box_xywh[2] = box_xywh[2] - box_xywh[0] - box_xywh[3] = box_xywh[3] - box_xywh[1] - return box_xywh - - -def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: - assert len(args) > 0 and all( - len(a) == len(args[0]) for a in args - ), "Batched iteration must have inputs of all the same size." - n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) - for b in range(n_batches): - yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] - - -def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: - """ - Encodes masks to an uncompressed RLE, in the format expected by - pycoco tools. - """ - # Put in fortran order and flatten h,w - b, h, w = tensor.shape - tensor = tensor.permute(0, 2, 1).flatten(1) - - # Compute change indices - diff = tensor[:, 1:] ^ tensor[:, :-1] - change_indices = diff.nonzero() - - # Encode run length - out = [] - for i in range(b): - cur_idxs = change_indices[change_indices[:, 0] == i, 1] - cur_idxs = torch.cat( - [ - torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), - cur_idxs + 1, - torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), - ] - ) - btw_idxs = cur_idxs[1:] - cur_idxs[:-1] - counts = [] if tensor[i, 0] == 0 else [0] - counts.extend(btw_idxs.detach().cpu().tolist()) - out.append({"size": [h, w], "counts": counts}) - return out - - -def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: - """Compute a binary mask from an uncompressed RLE.""" - h, w = rle["size"] - mask = np.empty(h * w, dtype=bool) - idx = 0 - parity = False - for count in rle["counts"]: - mask[idx : idx + count] = parity - idx += count - parity ^= True - mask = mask.reshape(w, h) - return mask.transpose() # Put in C order - - -def area_from_rle(rle: Dict[str, Any]) -> int: - return sum(rle["counts"][1::2]) - - -def calculate_stability_score( - masks: torch.Tensor, mask_threshold: float, threshold_offset: float -) -> torch.Tensor: - """ - Computes the stability score for a batch of masks. The stability - score is the IoU between the binary masks obtained by thresholding - the predicted mask logits at high and low values. - """ - # One mask is always contained inside the other. - # Save memory by preventing unnecesary cast to torch.int64 - intersections = ( - (masks > (mask_threshold + threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - unions = ( - (masks > (mask_threshold - threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - return intersections / unions - - -def build_point_grid(n_per_side: int) -> np.ndarray: - """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" - offset = 1 / (2 * n_per_side) - points_one_side = np.linspace(offset, 1 - offset, n_per_side) - points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) - points_y = np.tile(points_one_side[:, None], (1, n_per_side)) - points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) - return points - - -def build_all_layer_point_grids( - n_per_side: int, n_layers: int, scale_per_layer: int -) -> List[np.ndarray]: - """Generates point grids for all crop layers.""" - points_by_layer = [] - for i in range(n_layers + 1): - n_points = int(n_per_side / (scale_per_layer**i)) - points_by_layer.append(build_point_grid(n_points)) - return points_by_layer - - -def generate_crop_boxes( - im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float -) -> Tuple[List[List[int]], List[int]]: - """ - Generates a list of crop boxes of different sizes. Each layer - has (2**i)**2 boxes for the ith layer. - """ - crop_boxes, layer_idxs = [], [] - im_h, im_w = im_size - short_side = min(im_h, im_w) - - # Original image - crop_boxes.append([0, 0, im_w, im_h]) - layer_idxs.append(0) - - def crop_len(orig_len, n_crops, overlap): - return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) - - for i_layer in range(n_layers): - n_crops_per_side = 2 ** (i_layer + 1) - overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) - - crop_w = crop_len(im_w, n_crops_per_side, overlap) - crop_h = crop_len(im_h, n_crops_per_side, overlap) - - crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] - crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] - - # Crops in XYWH format - for x0, y0 in product(crop_box_x0, crop_box_y0): - box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] - crop_boxes.append(box) - layer_idxs.append(i_layer + 1) - - return crop_boxes, layer_idxs - - -def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) - # Check if boxes has a channel dimension - if len(boxes.shape) == 3: - offset = offset.unsqueeze(1) - return boxes + offset - - -def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0]], device=points.device) - # Check if points has a channel dimension - if len(points.shape) == 3: - offset = offset.unsqueeze(1) - return points + offset - - -def uncrop_masks( - masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int -) -> torch.Tensor: - x0, y0, x1, y1 = crop_box - if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: - return masks - # Coordinate transform masks - pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) - pad = (x0, pad_x - x0, y0, pad_y - y0) - return torch.nn.functional.pad(masks, pad, value=0) - - -def remove_small_regions( - mask: np.ndarray, area_thresh: float, mode: str -) -> Tuple[np.ndarray, bool]: - """ - Removes small disconnected regions and holes in a mask. Returns the - mask and an indicator of if the mask has been modified. - """ - import cv2 # type: ignore - - assert mode in ["holes", "islands"] - correct_holes = mode == "holes" - working_mask = (correct_holes ^ mask).astype(np.uint8) - n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) - sizes = stats[:, -1][1:] # Row 0 is background label - small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] - if len(small_regions) == 0: - return mask, False - fill_labels = [0] + small_regions - if not correct_holes: - fill_labels = [i for i in range(n_labels) if i not in fill_labels] - # If every region is below threshold, keep largest - if len(fill_labels) == 0: - fill_labels = [int(np.argmax(sizes)) + 1] - mask = np.isin(regions, fill_labels) - return mask, True - - -def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: - from pycocotools import mask as mask_utils # type: ignore - - h, w = uncompressed_rle["size"] - rle = mask_utils.frPyObjects(uncompressed_rle, h, w) - rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json - return rle - - -def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: - """ - Calculates boxes in XYXY format around masks. Return [0,0,0,0] for - an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. - """ - # torch.max below raises an error on empty inputs, just skip in this case - if torch.numel(masks) == 0: - return torch.zeros(*masks.shape[:-2], 4, device=masks.device) - - # Normalize shape to CxHxW - shape = masks.shape - h, w = shape[-2:] - if len(shape) > 2: - masks = masks.flatten(0, -3) - else: - masks = masks.unsqueeze(0) - - # Get top and bottom edges - in_height, _ = torch.max(masks, dim=-1) - in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] - bottom_edges, _ = torch.max(in_height_coords, dim=-1) - in_height_coords = in_height_coords + h * (~in_height) - top_edges, _ = torch.min(in_height_coords, dim=-1) - - # Get left and right edges - in_width, _ = torch.max(masks, dim=-2) - in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] - right_edges, _ = torch.max(in_width_coords, dim=-1) - in_width_coords = in_width_coords + w * (~in_width) - left_edges, _ = torch.min(in_width_coords, dim=-1) - - # If the mask is empty the right edge will be to the left of the left edge. - # Replace these boxes with [0, 0, 0, 0] - empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) - out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) - out = out * (~empty_filter).unsqueeze(-1) - - # Return to original shape - if len(shape) > 2: - out = out.reshape(*shape[:-2], 4) - else: - out = out[0] - - return out diff --git a/spaces/Justin-Choo/Lemon_WEB_UI/app.py b/spaces/Justin-Choo/Lemon_WEB_UI/app.py deleted file mode 100644 index ca8bed6e2dffb239c72a70a035ed537ebffe6446..0000000000000000000000000000000000000000 --- a/spaces/Justin-Choo/Lemon_WEB_UI/app.py +++ /dev/null @@ -1,149 +0,0 @@ -import os -from sys import executable as pyexecutable -import subprocess -import pathlib -import gc - -def Gitclone(URI:str,ClonePath:str = "") -> int : - if(ClonePath == "") : - while True: - i=subprocess.run([r"git",r"clone",URI]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i - else: - while True: - i=subprocess.run([r"git",r"clone",URI,ClonePath]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int: - while (True): - i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]); - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -user_home =pathlib.Path.home().resolve() -os.chdir(str(user_home)) -#clone stable-diffusion-webui repo -print("cloning stable-diffusion-webui repo") -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui")) -os.chdir(str(user_home / r"stable-diffusion-webui")) -os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045") -# - -#install extensions -print("installing extensions") -Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative")) -Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive")) -DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth") -while True: - if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0): - break -Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" )) -#Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser")) -Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface")) -Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser")) -Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks")) -Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet")) -Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor")) -Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib")) -Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex")) -Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor")) -#中文本地化的请解除下一行的注释 -#Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN")) -Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete")) -Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels")) -Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui")) -Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin")) - -#Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" )) -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg")) -Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot")) -Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo")) - -os.chdir(user_home / r"stable-diffusion-webui") - -#download ControlNet models -print("extensions dolwnload done .\ndownloading ControlNet models") -dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"] -for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name) -del dList - -#download model -#you can change model download address here -print("ControlNet models download done.\ndownloading model") -DownLoad(r"https://huggingface.co/iZELX1/Grapefruit/resolve/main/lemon.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"lemon.safetensors") - -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt") -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt") -#DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"orangemix.vae.pt") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_BakedVAE.safetensors") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors") -#DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors") - -DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors") -DownLoad(r"https://civitai.com/api/download/models/21065",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"LAS.safetensors") -DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors") -#strt webui - -print("Done\nStarting Webui...") -os.chdir(user_home / r"stable-diffusion-webui") -while True: - ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")]) - if(ret.returncode == 0 ): - del ret - gc.collect() - else : - del ret - -del os ,user_home ,pyexecutable ,subprocess \ No newline at end of file diff --git a/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py b/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py deleted file mode 100644 index a4ff5ea3ddd7c612c640544099ab98a861b8fe35..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/extract_locale.py +++ /dev/null @@ -1,34 +0,0 @@ -import json -import re - -# Define regular expression patterns -pattern = r"""i18n\([\s\n\t]*(["'][^"']+["'])[\s\n\t]*\)""" - -# Initialize the dictionary to store key-value pairs -data = {} - - -def process(fn: str): - global data - with open(fn, "r", encoding="utf-8") as f: - contents = f.read() - matches = re.findall(pattern, contents) - for key in matches: - key = eval(key) - print("extract:", key) - data[key] = key - - -print("processing infer-web.py") -process("infer-web.py") - -print("processing gui_v0.py") -process("gui_v0.py") - -print("processing gui_v1.py") -process("gui_v1.py") - -# Save as a JSON file -with open("./i18n/en_US.json", "w", encoding="utf-8") as f: - json.dump(data, f, ensure_ascii=False, indent=4) - f.write("\n") diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py deleted file mode 100644 index a38b7bb3ae3136b07eadfc2db445fef4c2de186b..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/layers_537227KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py deleted file mode 100644 index e16ff85f7037b36fb2046fcbcd3af523050a6516..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .csp_darknet import CSPDarknet -from .cspnext import CSPNeXt -from .darknet import Darknet -from .detectors_resnet import DetectoRS_ResNet -from .detectors_resnext import DetectoRS_ResNeXt -from .efficientnet import EfficientNet -from .hourglass import HourglassNet -from .hrnet import HRNet -from .mobilenet_v2 import MobileNetV2 -from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2 -from .regnet import RegNet -from .res2net import Res2Net -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1d -from .resnext import ResNeXt -from .ssd_vgg import SSDVGG -from .swin import SwinTransformer -from .trident_resnet import TridentResNet - -__all__ = [ - 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', - 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet', - 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet', - 'SwinTransformer', 'PyramidVisionTransformer', - 'PyramidVisionTransformerV2', 'EfficientNet', 'CSPNeXt' -] diff --git a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py b/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py deleted file mode 100644 index 29b2d78eec2b4de5e617a21120abd5fb5a716ee5..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Aesthetic_RVC_Inference_HF/lib/infer/infer_pack/modules/F0Predictor/PMF0Predictor.py +++ /dev/null @@ -1,97 +0,0 @@ -from lib.infer.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import parselmouth -import numpy as np - - -class PMF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def compute_f0(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0 - - def compute_f0_uv(self, wav, p_len=None): - x = wav - if p_len is None: - p_len = x.shape[0] // self.hop_length - else: - assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error" - time_step = self.hop_length / self.sampling_rate * 1000 - f0 = ( - parselmouth.Sound(x, self.sampling_rate) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=self.f0_min, - pitch_ceiling=self.f0_max, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant") - f0, uv = self.interpolate_f0(f0) - return f0, uv diff --git a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py b/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py deleted file mode 100644 index ec4849285c3fb1ee5ccad6ffd220787108482150..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/analyzers/calmar.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import backtrader as bt -from . import TimeDrawDown - - -__all__ = ['Calmar'] - - -class Calmar(bt.TimeFrameAnalyzerBase): - '''This analyzer calculates the CalmarRatio - timeframe which can be different from the one used in the underlying data - Params: - - - ``timeframe`` (default: ``None``) - If ``None`` the ``timeframe`` of the 1st data in the system will be - used - - Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no - time constraints - - - ``compression`` (default: ``None``) - - Only used for sub-day timeframes to for example work on an hourly - timeframe by specifying "TimeFrame.Minutes" and 60 as compression - - If ``None`` then the compression of the 1st data of the system will be - used - - *None* - - - ``fund`` (default: ``None``) - - If ``None`` the actual mode of the broker (fundmode - True/False) will - be autodetected to decide if the returns are based on the total net - asset value or on the fund value. See ``set_fundmode`` in the broker - documentation - - Set it to ``True`` or ``False`` for a specific behavior - - See also: - - - https://en.wikipedia.org/wiki/Calmar_ratio - - Methods: - - ``get_analysis`` - - Returns a OrderedDict with a key for the time period and the - corresponding rolling Calmar ratio - - Attributes: - - ``calmar`` the latest calculated calmar ratio - ''' - - packages = ('collections', 'math',) - - params = ( - ('timeframe', bt.TimeFrame.Months), # default in calmar - ('period', 36), - ('fund', None), - ) - - def __init__(self): - self._maxdd = TimeDrawDown(timeframe=self.p.timeframe, - compression=self.p.compression) - - def start(self): - self._mdd = float('-inf') - self._values = collections.deque([float('Nan')] * self.p.period, - maxlen=self.p.period) - if self.p.fund is None: - self._fundmode = self.strategy.broker.fundmode - else: - self._fundmode = self.p.fund - - if not self._fundmode: - self._values.append(self.strategy.broker.getvalue()) - else: - self._values.append(self.strategy.broker.fundvalue) - - def on_dt_over(self): - self._mdd = max(self._mdd, self._maxdd.maxdd) - if not self._fundmode: - self._values.append(self.strategy.broker.getvalue()) - else: - self._values.append(self.strategy.broker.fundvalue) - rann = math.log(self._values[-1] / self._values[0]) / len(self._values) - self.calmar = calmar = rann / (self._mdd or float('Inf')) - - self.rets[self.dtkey] = calmar - - def stop(self): - self.on_dt_over() # update last values diff --git a/spaces/Libra7578/Image-to-video/README.md b/spaces/Libra7578/Image-to-video/README.md deleted file mode 100644 index 17b1d6148c5d9f28e1aa878788a4ab39a2e0b3e9..0000000000000000000000000000000000000000 --- a/spaces/Libra7578/Image-to-video/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image To Video -emoji: 🎬 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.24.1 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py b/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py deleted file mode 100644 index a99ad55ae3bda76cd858f8567d48dcd5f41d9946..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/model/decoder/agif_decoder.py +++ /dev/null @@ -1,16 +0,0 @@ -from common.utils import HiddenData, OutputData -from model.decoder.base_decoder import BaseDecoder - - -class AGIFDecoder(BaseDecoder): - def forward(self, hidden: HiddenData, **kwargs): - # hidden = self.interaction(hidden) - pred_intent = self.intent_classifier(hidden) - intent_index = self.intent_classifier.decode(OutputData(pred_intent, None), - return_list=False, - return_sentence_level=True) - interact_args = {"intent_index": intent_index, - "batch_size": pred_intent.classifier_output.shape[0], - "intent_label_num": self.intent_classifier.config["intent_label_num"]} - pred_slot = self.slot_classifier(hidden, internal_interaction=self.interaction, **interact_args) - return OutputData(pred_intent, pred_slot) diff --git a/spaces/LightSY/W2L-TD/facelib/face_api.py b/spaces/LightSY/W2L-TD/facelib/face_api.py deleted file mode 100644 index d8b103b107ca580d9dcbc4947c21883dff51bc7f..0000000000000000000000000000000000000000 --- a/spaces/LightSY/W2L-TD/facelib/face_api.py +++ /dev/null @@ -1,173 +0,0 @@ -import numpy as np - - -# batched detection -from PIL import Image -import cv2 - -def batched_transform(self, frames, use_origin_size): - """ - Arguments: - frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c], - type=np.float32, BGR format). - use_origin_size: whether to use origin size. - """ - from_PIL = True if isinstance(frames[0], Image.Image) else False - - # convert to opencv format - if from_PIL: - frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames] - frames = np.asarray(frames, dtype=np.float32) - - # testing scale - im_size_min = np.min(frames[0].shape[0:2]) - im_size_max = np.max(frames[0].shape[0:2]) - resize = float(self.target_size) / float(im_size_min) - - # prevent bigger axis from being more than max_size - if np.round(resize * im_size_max) > self.max_size: - resize = float(self.max_size) / float(im_size_max) - resize = 1 if use_origin_size else resize - - # resize - if resize != 1: - if not from_PIL: - frames = F.interpolate(frames, scale_factor=resize) - else: - frames = [ - cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR) - for frame in frames - ] - - # convert to torch.tensor format - # if not from_PIL: - # frames = frames.transpose(1, 2).transpose(1, 3).contiguous() - # else: - # frames = frames.transpose((0, 3, 1, 2)) - # frames = torch.from_numpy(frames) - frames = frames.transpose((0, 3, 1, 2)) - # frames = torch.from_numpy(frames) - - return frames, resize -def __detect_faces(inputs): - # get scale - height, width = inputs.shape[2:] - # self.scale = torch.tensor([width, height, width, height], dtype=torch.float32).to(device) - scale = np.array([width, height, width, height], dtype=np.float32) - tmp = [width, height, width, height, width, height, width, height, width, height] - # self.scale1 = torch.tensor(tmp, dtype=torch.float32).to(device) - scale1 = np.array(tmp, dtype=np.float32) - - # forawrd - # inputs = inputs.to(device) - inputs = inputs - # if self.half_inference: - # inputs = inputs.half() - loc, conf, landmarks = self(inputs) - - # get priorbox - priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:]) - # priors = priorbox.forward().to(device) - priors = priorbox.forward() - - return loc, conf, landmarks, priors - -# def batch_detect(net, imgs, device): -def batch_detect(frames, conf_threshold = 0.8, nms_threshold = 0.4, use_origin_size = True): - - frames, resize = batched_transform(frames, use_origin_size) - frames = frames - frames = frames - np.array([104, 117, 123]) - - b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames) - - final_bounding_boxes, final_landmarks = [], [] - - # decode - priors = priors.unsqueeze(0) - b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize - # b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize - b_conf = b_conf[:, :, 1] - - # index for selection - b_indice = b_conf > conf_threshold - - # concat - b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float() - - for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice): - - # ignore low scores - # pred, landm = pred[inds, :], landm[inds, :] - pred = pred[inds, :] - if pred.shape[0] == 0: - final_bounding_boxes.append(np.array([], dtype=np.float32)) - # final_landmarks.append(np.array([], dtype=np.float32)) - continue - - # to CPU - # bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy() #原本 - # bounding_boxes, landm = pred.cpu().detach().numpy(), landm.cpu().detach().numpy() - bounding_boxes = pred.cpu().detach().numpy() - - # NMS - keep = py_cpu_nms(bounding_boxes, nms_threshold) - # bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep] - bounding_boxes = bounding_boxes[keep, :] - - # append - d = bounding_boxes[0] - d = np.clip(d, 0, None) - x1, y1, x2, y2 = map(int, d[:-1]) - final_bounding_boxes.append((x1, y1, x2, y2)) - # final_bounding_boxes.append(bounding_boxes) - # final_landmarks.append(landmarks) - # self.t['forward_pass'].toc(average=True) - # self.batch_time += self.t['forward_pass'].diff - # self.total_frame += len(frames) - # print(self.batch_time / self.total_frame) - - return final_bounding_boxes - - - imgs = imgs - np.array([104, 117, 123]) - imgs = imgs.transpose(0, 3, 1, 2) - imgs = np.array(imgs, dtype=np.float32) - # if 'cuda' in device: - # torch.backends.cudnn.benchmark = True - - # imgs = torch.from_numpy(imgs).float().to(device) - BB, CC, HH, WW = imgs.shape - # with torch.no_grad(): - # olist = net(imgs) - olist = net.run(None, {'img': imgs}) - - bboxlist = [] - for i in range(len(olist) // 2): - olist[i * 2] = softmax(olist[i * 2], axis=1) - # olist = [oelem.data.cpu() for oelem in olist] - for i in range(len(olist) // 2): - ocls, oreg = olist[i * 2], olist[i * 2 + 1] - # FB, FC, FH, FW = ocls.size() # feature map size - FB, FC, FH, FW = ocls.shape - stride = 2**(i + 2) # 4,8,16,32,64,128 - anchor = stride * 4 - poss = zip(*np.where(ocls[:, 1, :, :] > 0.05)) - for Iindex, hindex, windex in poss: - axc, ayc = stride / 2 + windex * stride, stride / 2 + hindex * stride - score = ocls[:, 1, hindex, windex] - loc = oreg[:, :, hindex, windex].reshape(BB, 1, 4) - priors = np.array([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]) - priors = priors.reshape(1, 1, 4) - # priors = torch.Tensor([[axc / 1.0, ayc / 1.0, stride * 4 / 1.0, stride * 4 / 1.0]]).view(1, 1, 4) - variances = [0.1, 0.2] - box = batch_decode(loc, priors, variances) - box = box[:, 0] * 1.0 - # cv2.rectangle(imgshow,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),1) - score = np.expand_dims(score,axis=1) - bboxlist.append(np.concatenate([box, score], 1)) - bboxlist = np.array(bboxlist) - if 0 == len(bboxlist): - bboxlist = np.zeros((1, BB, 5)) - - return bboxlist \ No newline at end of file diff --git a/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py b/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py deleted file mode 100644 index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000 --- a/spaces/Luelll/ChuanhuChatGPT/locale/extract_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import json -import re - -# Define regular expression patterns -pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)' - -# Load the .py file -with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f: - contents = f.read() - -# Load the .py files in the modules folder -for filename in os.listdir("modules"): - if filename.endswith(".py"): - with open(os.path.join("modules", filename), "r", encoding="utf-8") as f: - contents += f.read() - -# Matching with regular expressions -matches = re.findall(pattern, contents, re.DOTALL) - -# Convert to key/value pairs -data = {match.strip('()"'): '' for match in matches} - -# Save as a JSON file -with open('labels.json', 'w', encoding='utf-8') as f: - json.dump(data, f, ensure_ascii=False, indent=4) \ No newline at end of file diff --git a/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh b/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh deleted file mode 100644 index c3b9794be0f3ceb12d4d0e1185074b57aaba8a49..0000000000000000000000000000000000000000 --- a/spaces/MLVKU/Human_Object_Interaction/configs/hico_train.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -x - -EXP_DIR=logs_run_001 -PY_ARGS=${@:1} - -python -u main.py \ - --project_name CPC_HOTR_HICODET \ - --run_name ${EXP_DIR} \ - --HOIDet \ - --validate \ - --share_enc \ - --pretrained_dec \ - --use_consis \ - --share_dec_param \ - --epochs 90 \ - --lr_drop 60 \ - --lr 1e-4 \ - --lr_backbone 1e-5 \ - --ramp_up_epoch 30 \ - --path_id 0 \ - --num_hoi_queries 16 \ - --set_cost_idx 20 \ - --hoi_idx_loss_coef 1 \ - --hoi_act_loss_coef 10 \ - --backbone resnet50 \ - --hoi_consistency_loss_coef 0.5 \ - --hoi_idx_consistency_loss_coef 1 \ - --hoi_act_consistency_loss_coef 2 \ - --stop_grad_stage \ - --hoi_eos_coef 0.1 \ - --temperature 0.2 \ - --no_aux_loss \ - --hoi_aux_loss \ - --dataset_file hico-det \ - --data_path hico_20160224_det \ - --frozen_weights https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth \ - --output_dir checkpoints/hico_det/ \ - --augpath_name [\'p2\',\'p3\',\'p4\'] \ - ${PY_ARGS} \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py deleted file mode 100644 index c190cee6bdc7922b688ea75dc8f152fa15c24617..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/configs/_base_/schedules/schedule_80k.py +++ /dev/null @@ -1,9 +0,0 @@ -# optimizer -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) -optimizer_config = dict() -# learning policy -lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) -# runtime settings -runner = dict(type='IterBasedRunner', max_iters=80000) -checkpoint_config = dict(by_epoch=False, interval=8000) -evaluation = dict(interval=8000, metric='mIoU') diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py deleted file mode 100644 index 9207aa95e6730bd9b3362dee612059a5f0ce1c5e..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmcv/ops/cc_attention.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmcv.cnn import PLUGIN_LAYERS, Scale - - -def NEG_INF_DIAG(n, device): - """Returns a diagonal matrix of size [n, n]. - - The diagonal are all "-inf". This is for avoiding calculating the - overlapped element in the Criss-Cross twice. - """ - return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0) - - -@PLUGIN_LAYERS.register_module() -class CrissCrossAttention(nn.Module): - """Criss-Cross Attention Module. - - .. note:: - Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch - to a pure PyTorch and equivalent implementation. For more - details, please refer to https://github.com/open-mmlab/mmcv/pull/1201. - - Speed comparison for one forward pass - - - Input size: [2,512,97,97] - - Device: 1 NVIDIA GeForce RTX 2080 Ti - - +-----------------------+---------------+------------+---------------+ - | |PyTorch version|CUDA version|Relative speed | - +=======================+===============+============+===============+ - |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x | - +-----------------------+---------------+------------+---------------+ - |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x | - +-----------------------+---------------+------------+---------------+ - - Args: - in_channels (int): Channels of the input feature map. - """ - - def __init__(self, in_channels): - super().__init__() - self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1) - self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1) - self.value_conv = nn.Conv2d(in_channels, in_channels, 1) - self.gamma = Scale(0.) - self.in_channels = in_channels - - def forward(self, x): - """forward function of Criss-Cross Attention. - - Args: - x (Tensor): Input feature. \ - shape (batch_size, in_channels, height, width) - Returns: - Tensor: Output of the layer, with shape of \ - (batch_size, in_channels, height, width) - """ - B, C, H, W = x.size() - query = self.query_conv(x) - key = self.key_conv(x) - value = self.value_conv(x) - energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG( - H, query.device) - energy_H = energy_H.transpose(1, 2) - energy_W = torch.einsum('bchw,bchj->bhwj', query, key) - attn = F.softmax( - torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)] - out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H]) - out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:]) - - out = self.gamma(out) + x - out = out.contiguous() - - return out - - def __repr__(self): - s = self.__class__.__name__ - s += f'(in_channels={self.in_channels})' - return s diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py b/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py deleted file mode 100644 index 55b2aeea120bbe51ca837265fcb7fbff467e55f2..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/autogpt/config/singleton.py +++ /dev/null @@ -1,24 +0,0 @@ -"""The singleton metaclass for ensuring only one instance of a class.""" -import abc - - -class Singleton(abc.ABCMeta, type): - """ - Singleton metaclass for ensuring only one instance of a class. - """ - - _instances = {} - - def __call__(cls, *args, **kwargs): - """Call method for the singleton metaclass.""" - if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) - return cls._instances[cls] - - -class AbstractSingleton(abc.ABC, metaclass=Singleton): - """ - Abstract singleton class for ensuring only one instance of a class. - """ - - pass diff --git a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py b/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py deleted file mode 100644 index ecbac9b73bd9ad872931d77e144dd853b3d8ef64..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/tests/unit/test_commands.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Unit tests for the commands module""" -from unittest.mock import MagicMock, patch - -import pytest - -import autogpt.agent.agent_manager as agent_manager -from autogpt.app import execute_command, list_agents, start_agent - - -@pytest.mark.integration_test -def test_make_agent() -> None: - """Test the make_agent command""" - with patch("openai.ChatCompletion.create") as mock: - obj = MagicMock() - obj.response.choices[0].messages[0].content = "Test message" - mock.return_value = obj - start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2") - agents = list_agents() - assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2") - agents = list_agents() - assert "List of agents:\n0: chat\n1: write" == agents diff --git a/spaces/MohammedAlakhras/Telegram_API/app.py b/spaces/MohammedAlakhras/Telegram_API/app.py deleted file mode 100644 index 139f1b8166a34939a01e92d5b6148364b006622b..0000000000000000000000000000000000000000 --- a/spaces/MohammedAlakhras/Telegram_API/app.py +++ /dev/null @@ -1,115 +0,0 @@ -import gradio as gr -from telethon.sync import TelegramClient, events -import datetime -import socks -import time -import os -import requests -import random - - - - - - - - - -def download_file(url, filename): - response = requests.get(url) - with open(filename, 'wb') as file: - file.write(response.content) - - -RndMssgs = [ - "I've received your message. I'll get back to you as soon as possible.", - "I appreciate your patience and understanding.", - "Your message has been received. I'll respond shortly.", - "I'm currently unavailable, but I'll reply as soon as I can.", - "Your message has been received and it's important to us.", - "I'm away at the moment. Rest assured, I'll respond to your message soon.", - "Your message is received. Thank you for your understanding.", - "I've received your message and will respond at my earliest convenience.", - "I've received your message. I'll make sure to reply as soon as possible.", - "I'm currently out offline, but I'll get back to you shortly.", - "I've received your message. I'll get back to you soon.", - "I'm not available right now, but I've received your message.", - "Thank you for contacting me. I'll respond as soon as I can.", - "I'm currently busy, but I'll make sure to reply to your message as soon as possible.", - "Your message has been received. I'll get back to you shortly.", - "Thank you for your patience. I'll respond as soon as I can.", - "I've received your message and will get back to you shortly.", - "Your message has been received. Thank you for waiting.", - "I'm currently on a break, but I've received your message and will respond soon." -] - - - - - -proxy_server = '142.93.68.63' -proxy_port = 2434 -proxy_secret = 'ee32b920dffb51643028e2f6b878d4eac1666172616b61762e636f6d' -proxy_dc_id = 2 # This is usually 2 for MTProto proxies - -proxy = ( - socks.SOCKS5, - proxy_server, - proxy_port, - True, - 'vpn', - 'unlimited' -) - -api_id=os.environ['apiID'] -api_hash=os.environ['apiHash'] -phone=os.environ['phone'] -username=os.environ['username'] - -serssionFile=os.environ['sessionUrlFile'] - -download_file(serssionFile, 'anon.session') - - - -# Dictionary to track the times when senders were last replied to -reply_times = {} - -async def main(): - async with TelegramClient('anon', api_id, api_hash) as client: - @client.on(events.NewMessage()) - async def my_event_handler(event): - sender = await event.get_sender() - sender_id = sender.id - sender_name = sender.first_name - chat = await event.get_chat() - chat_id = chat.id - text = event.raw_text - - # Personal message - if chat_id == sender_id and not sender.bot: - # Check the last reply to this sender - last_reply_time = reply_times.get(str(sender_id), None) - if last_reply_time is None or time.time() - last_reply_time > 60*60*6: # reply only if not replied in the last minute - response = f'Hello {sender_name} 👋🏻 ,\n {random.choice(RndMssgs)} 😊' - await client.send_message(chat_id, response, parse_mode='HTML') - reply_times[str(sender_id)] = time.time() # update the last reply time - - # Group message - elif username in text: - last_reply_time = reply_times.get(str(str(chat_id)+str(sender_id)), None) - if last_reply_time is None or time.time() - last_reply_time > 60*5: - response = f'Hello {sender_name} @ {chat.title} 👋🏻,\n {random.choice(RndMssgs)} 😊' - await client.send_message(chat_id, response, parse_mode='HTML') - reply_times[str(str(chat_id)+str(sender_id))] = time.time() - - - await client.run_until_disconnected() - - -# Gradio Inteface -inputs = [] -output = "text" -gr.Interface(fn=main, inputs=inputs, outputs=output).launch() - - # client.loop.run_until_complete(main()) \ No newline at end of file diff --git a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md b/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md deleted file mode 100644 index e6ea0c7481d86560e929561bb6afe8e9217cabb8..0000000000000000000000000000000000000000 --- a/spaces/MultiAgentSystems/WhisperGPTMultiAgentSystems/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: WhisperGPTMultiAgentSystems -emoji: 📚 -colorFrom: green -colorTo: indigo -sdk: streamlit -sdk_version: 1.28.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py b/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py deleted file mode 100644 index 2f3463b64f988aa61d90838ddcf8ac89053c3377..0000000000000000000000000000000000000000 --- a/spaces/NAACL2022/CLIP-Caption-Reward/captioning/models/ShowTellModel.py +++ /dev/null @@ -1,174 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import * -from . import utils - -from .CaptionModel import CaptionModel - -class ShowTellModel(CaptionModel): - def __init__(self, opt): - super(ShowTellModel, self).__init__() - self.vocab_size = opt.vocab_size - self.input_encoding_size = opt.input_encoding_size - self.rnn_type = opt.rnn_type - self.rnn_size = opt.rnn_size - self.num_layers = opt.num_layers - self.drop_prob_lm = opt.drop_prob_lm - self.seq_length = opt.seq_length - self.fc_feat_size = opt.fc_feat_size - - self.ss_prob = 0.0 # Schedule sampling probability - - self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size) - self.core = getattr(nn, self.rnn_type.upper())(self.input_encoding_size, self.rnn_size, self.num_layers, bias=False, dropout=self.drop_prob_lm) - self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size) - self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1) - self.dropout = nn.Dropout(self.drop_prob_lm) - - self.init_weights() - - def init_weights(self): - initrange = 0.1 - self.embed.weight.data.uniform_(-initrange, initrange) - self.logit.bias.data.fill_(0) - self.logit.weight.data.uniform_(-initrange, initrange) - - def init_hidden(self, bsz): - weight = self.logit.weight - if self.rnn_type == 'lstm': - return (weight.new_zeros(self.num_layers, bsz, self.rnn_size), - weight.new_zeros(self.num_layers, bsz, self.rnn_size)) - else: - return weight.new_zeros(self.num_layers, bsz, self.rnn_size) - - def _forward(self, fc_feats, att_feats, seq, att_masks=None): - batch_size = fc_feats.size(0) - seq_per_img = seq.shape[0] // batch_size - state = self.init_hidden(batch_size*seq_per_img) - outputs = [] - - if seq_per_img > 1: - fc_feats = utils.repeat_tensors(seq_per_img, fc_feats) - - for i in range(seq.size(1) + 1): - if i == 0: - xt = self.img_embed(fc_feats) - else: - if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample - sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1) - sample_mask = sample_prob < self.ss_prob - if sample_mask.sum() == 0: - it = seq[:, i-1].clone() - else: - sample_ind = sample_mask.nonzero().view(-1) - it = seq[:, i-1].data.clone() - #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1) - #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1)) - prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1) - it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind)) - else: - it = seq[:, i-1].clone() - # break if all the sequences end - if i >= 2 and seq[:, i-1].data.sum() == 0: - break - xt = self.embed(it) - - output, state = self.core(xt.unsqueeze(0), state) - output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1) - outputs.append(output) - - return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous() - - def get_logprobs_state(self, it, state): - # 'it' contains a word index - xt = self.embed(it) - - output, state = self.core(xt.unsqueeze(0), state) - logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1) - - return logprobs, state - - def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}): - beam_size = opt.get('beam_size', 10) - batch_size = fc_feats.size(0) - - assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed' - seq = torch.LongTensor(self.seq_length, batch_size).zero_() - seqLogprobs = torch.FloatTensor(self.seq_length, batch_size) - # lets process every image independently for now, for simplicity - - self.done_beams = [[] for _ in range(batch_size)] - for k in range(batch_size): - state = self.init_hidden(beam_size) - for t in range(2): - if t == 0: - xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size) - elif t == 1: # input - it = fc_feats.data.new(beam_size).long().zero_() - xt = self.embed(it) - - output, state = self.core(xt.unsqueeze(0), state) - logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1) - - self.done_beams[k] = self.beam_search(state, logprobs, opt=opt) - seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score - seqLogprobs[:, k] = self.done_beams[k][0]['logps'] - # return the samples and their log likelihoods - return seq.transpose(0, 1), seqLogprobs.transpose(0, 1) - - def _sample(self, fc_feats, att_feats, att_masks=None, opt={}): - sample_method = opt.get('sample_method', 'greedy') - beam_size = opt.get('beam_size', 1) - temperature = opt.get('temperature', 1.0) - if beam_size > 1 and sample_method in ['greedy', 'beam_search']: - return self.sample_beam(fc_feats, att_feats, opt) - - batch_size = fc_feats.size(0) - state = self.init_hidden(batch_size) - seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long) - seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length) - for t in range(self.seq_length + 2): - if t == 0: - xt = self.img_embed(fc_feats) - else: - if t == 1: # input - it = fc_feats.data.new(batch_size).long().zero_() - xt = self.embed(it) - - output, state = self.core(xt.unsqueeze(0), state) - logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1) - - # sample the next word - if t == self.seq_length + 1: # skip if we achieve maximum length - break - if sample_method == 'greedy': - sampleLogprobs, it = torch.max(logprobs.data, 1) - it = it.view(-1).long() - else: - if temperature == 1.0: - prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1) - else: - # scale logprobs by temperature - prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu() - it = torch.multinomial(prob_prev, 1).to(logprobs.device) - sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions - it = it.view(-1).long() # and flatten indices for downstream processing - - if t >= 1: - # stop when all finished - if t == 1: - unfinished = it > 0 - else: - unfinished = unfinished & (it > 0) - it = it * unfinished.type_as(it) - seq[:,t-1] = it #seq[t] the input of t+2 time step - seqLogprobs[:,t-1] = sampleLogprobs.view(-1) - if unfinished.sum() == 0: - break - - return seq, seqLogprobs \ No newline at end of file diff --git a/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py b/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py deleted file mode 100644 index 09555af09720382a795712f0fdd9b711c5b19e02..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/PortaSpeech/utils/text/text_encoder.py +++ /dev/null @@ -1,263 +0,0 @@ -import json -import re -import six -from six.moves import range # pylint: disable=redefined-builtin - -PAD = "" -EOS = "" -UNK = "" -SEG = "|" -PUNCS = '!,.?;:' -RESERVED_TOKENS = [PAD, EOS, UNK] -NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) -PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 -EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 -UNK_ID = RESERVED_TOKENS.index(UNK) # Normally 2 - -if six.PY2: - RESERVED_TOKENS_BYTES = RESERVED_TOKENS -else: - RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] - -# Regular expression for unescaping token strings. -# '\u' is converted to '_' -# '\\' is converted to '\' -# '\213;' is converted to unichr(213) -_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") -_ESCAPE_CHARS = set(u"\\_u;0123456789") - - -def strip_ids(ids, ids_to_strip): - """Strip ids_to_strip from the end ids.""" - ids = list(ids) - while ids and ids[-1] in ids_to_strip: - ids.pop() - return ids - - -class TextEncoder(object): - """Base class for converting from ints to/from human readable strings.""" - - def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): - self._num_reserved_ids = num_reserved_ids - - @property - def num_reserved_ids(self): - return self._num_reserved_ids - - def encode(self, s): - """Transform a human-readable string into a sequence of int ids. - - The ids should be in the range [num_reserved_ids, vocab_size). Ids [0, - num_reserved_ids) are reserved. - - EOS is not appended. - - Args: - s: human-readable string to be converted. - - Returns: - ids: list of integers - """ - return [int(w) + self._num_reserved_ids for w in s.split()] - - def decode(self, ids, strip_extraneous=False): - """Transform a sequence of int ids into a human-readable string. - - EOS is not expected in ids. - - Args: - ids: list of integers to be converted. - strip_extraneous: bool, whether to strip off extraneous tokens - (EOS and PAD). - - Returns: - s: human-readable string. - """ - if strip_extraneous: - ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - """Transform a sequence of int ids into a their string versions. - - This method supports transforming individual input/output ids to their - string versions so that sequence to/from text conversions can be visualized - in a human readable format. - - Args: - ids: list of integers to be converted. - - Returns: - strs: list of human-readable string. - """ - decoded_ids = [] - for id_ in ids: - if 0 <= id_ < self._num_reserved_ids: - decoded_ids.append(RESERVED_TOKENS[int(id_)]) - else: - decoded_ids.append(id_ - self._num_reserved_ids) - return [str(d) for d in decoded_ids] - - @property - def vocab_size(self): - raise NotImplementedError() - - -class TokenTextEncoder(TextEncoder): - """Encoder based on a user-supplied vocabulary (file or list).""" - - def __init__(self, - vocab_filename, - reverse=False, - vocab_list=None, - replace_oov=None, - num_reserved_ids=NUM_RESERVED_TOKENS): - """Initialize from a file or list, one token per line. - - Handling of reserved tokens works as follows: - - When initializing from a list, we add reserved tokens to the vocab. - - When initializing from a file, we do not add reserved tokens to the vocab. - - When saving vocab files, we save reserved tokens to the file. - - Args: - vocab_filename: If not None, the full filename to read vocab from. If this - is not None, then vocab_list should be None. - reverse: Boolean indicating if tokens should be reversed during encoding - and decoding. - vocab_list: If not None, a list of elements of the vocabulary. If this is - not None, then vocab_filename should be None. - replace_oov: If not None, every out-of-vocabulary token seen when - encoding will be replaced by this string (which must be in vocab). - num_reserved_ids: Number of IDs to save for reserved tokens like . - """ - super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) - self._reverse = reverse - self._replace_oov = replace_oov - if vocab_filename: - self._init_vocab_from_file(vocab_filename) - else: - assert vocab_list is not None - self._init_vocab_from_list(vocab_list) - self.pad_index = self.token_to_id[PAD] - self.eos_index = self.token_to_id[EOS] - self.unk_index = self.token_to_id[UNK] - self.seg_index = self.token_to_id[SEG] if SEG in self.token_to_id else self.eos_index - - def encode(self, s): - """Converts a space-separated string of tokens to a list of ids.""" - sentence = s - tokens = sentence.strip().split() - if self._replace_oov is not None: - tokens = [t if t in self.token_to_id else self._replace_oov - for t in tokens] - ret = [self.token_to_id[tok] for tok in tokens] - return ret[::-1] if self._reverse else ret - - def decode(self, ids, strip_eos=False, strip_padding=False): - if strip_padding and self.pad() in list(ids): - pad_pos = list(ids).index(self.pad()) - ids = ids[:pad_pos] - if strip_eos and self.eos() in list(ids): - eos_pos = list(ids).index(self.eos()) - ids = ids[:eos_pos] - return " ".join(self.decode_list(ids)) - - def decode_list(self, ids): - seq = reversed(ids) if self._reverse else ids - return [self._safe_id_to_token(i) for i in seq] - - @property - def vocab_size(self): - return len(self.id_to_token) - - def __len__(self): - return self.vocab_size - - def _safe_id_to_token(self, idx): - return self.id_to_token.get(idx, "ID_%d" % idx) - - def _init_vocab_from_file(self, filename): - """Load vocab from a file. - - Args: - filename: The file to load vocabulary from. - """ - with open(filename) as f: - tokens = [token.strip() for token in f.readlines()] - - def token_gen(): - for token in tokens: - yield token - - self._init_vocab(token_gen(), add_reserved_tokens=False) - - def _init_vocab_from_list(self, vocab_list): - """Initialize tokens from a list of tokens. - - It is ok if reserved tokens appear in the vocab list. They will be - removed. The set of tokens in vocab_list should be unique. - - Args: - vocab_list: A list of tokens. - """ - - def token_gen(): - for token in vocab_list: - if token not in RESERVED_TOKENS: - yield token - - self._init_vocab(token_gen()) - - def _init_vocab(self, token_generator, add_reserved_tokens=True): - """Initialize vocabulary with tokens from token_generator.""" - - self.id_to_token = {} - non_reserved_start_index = 0 - - if add_reserved_tokens: - self.id_to_token.update(enumerate(RESERVED_TOKENS)) - non_reserved_start_index = len(RESERVED_TOKENS) - - self.id_to_token.update( - enumerate(token_generator, start=non_reserved_start_index)) - - # _token_to_id is the reverse of _id_to_token - self.token_to_id = dict((v, k) for k, v in six.iteritems(self.id_to_token)) - - def pad(self): - return self.pad_index - - def eos(self): - return self.eos_index - - def unk(self): - return self.unk_index - - def seg(self): - return self.seg_index - - def store_to_file(self, filename): - """Write vocab file to disk. - - Vocab files have one token per line. The file ends in a newline. Reserved - tokens are written to the vocab file as well. - - Args: - filename: Full path of the file to store the vocab to. - """ - with open(filename, "w") as f: - for i in range(len(self.id_to_token)): - f.write(self.id_to_token[i] + "\n") - - def sil_phonemes(self): - return [p for p in self.id_to_token.values() if is_sil_phoneme(p)] - - -def build_token_encoder(token_list_file): - token_list = json.load(open(token_list_file)) - return TokenTextEncoder(None, vocab_list=token_list, replace_oov='') - - -def is_sil_phoneme(p): - return p == '' or not p[0].isalpha() diff --git a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py b/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py deleted file mode 100644 index 630eca427e478dbadad58bd94b56e89a5a747526..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/brain_coder/single_task/ga_train.py +++ /dev/null @@ -1,324 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -"""Genetic algorithm for BF tasks. - -Also contains the uniform random search algorithm. - -Inspired by https://github.com/primaryobjects/AI-Programmer. -GA function code borrowed from https://github.com/DEAP/deap. -""" - -import cPickle -import os -import sys -from time import sleep - -from absl import flags -from absl import logging -import numpy as np -from six.moves import xrange -import tensorflow as tf - -from common import utils # brain coder -from single_task import data # brain coder -from single_task import defaults # brain coder -from single_task import ga_lib # brain coder -from single_task import results_lib # brain coder - -FLAGS = flags.FLAGS - - -def define_tuner_hparam_space(hparam_space_type): - """Define tunable hparams for grid search.""" - if hparam_space_type != 'ga': - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - return { - 'population_size': [10, 25, 50, 100, 500], - 'crossover_rate': [0.2, 0.5, 0.7, 0.9, 0.95], - 'mutation_rate': [0.01, 0.03, 0.05, 0.1, 0.15]} - - -def write_hparams_to_config(config, hparams, hparam_space_type): - """Write hparams given by the tuner into the Config object.""" - if hparam_space_type != 'ga': - raise ValueError('Hparam space is not valid: "%s"' % hparam_space_type) - config.batch_size = hparams.population_size - config.agent.crossover_rate = hparams.crossover_rate - config.agent.mutation_rate = hparams.mutation_rate - - -class CheckpointWriter(object): - """Manages loading and saving GA populations to disk. - - This object is used by the genetic algorithm to save progress periodically - so that a recent population can be loaded from disk in the event of a restart. - """ - - def __init__(self, checkpoint_dir, population_size): - self.checkpoint_file = os.path.join(checkpoint_dir, 'checkpoint.pickle') - self.population_size = population_size - - def write(self, gen, population, halloffame): - """Write GA state to disk. - - Overwrites previous saved state. - - Args: - gen: Generation number. - population: List of Individual objects. - halloffame: Hall-of-fame buffer. Typically a priority queue. - """ - raw = cPickle.dumps((gen, population, halloffame)) - with tf.gfile.FastGFile(self.checkpoint_file, 'w') as f: - f.write(raw) - - def load(self): - """Loads GA state from disk. - - Loads whatever is on disk, which will be whatever the most recent call - to `write` wrote. - - Returns: - gen: Generation number. - population: List of Individual objects. - halloffame: Hall-of-fame buffer. Typically a priority queue. - """ - with tf.gfile.FastGFile(self.checkpoint_file, 'r') as f: - raw = f.read() - objs = cPickle.loads(raw) - # Validate data. - assert isinstance(objs, tuple) and len(objs) == 3, ( - 'Expecting a 3-tuple, but got %s instead.' % (objs,)) - gen, population, halloffame = objs - assert isinstance(gen, int), ( - 'Expecting `gen` to be an integer, got %s' % (gen,)) - assert ( - isinstance(population, list) - and len(population) == self.population_size - ), ( - 'Expecting `population` to be a list with size %d, got %s' - % (self.population_size, population)) - assert halloffame is None or len(halloffame) == 2, ( - 'Expecting hall-of-fame object to have length two, got length %d' - % len(halloffame)) - logging.info('Loaded pop from checkpoint file: "%s".', - self.checkpoint_file) - return gen, population, halloffame - - def has_checkpoint(self): - """Checks if a checkpoint exists on disk, and if so returns True.""" - return tf.gfile.Exists(self.checkpoint_file) - - -def run_training(config=None, tuner=None, logdir=None, trial_name=None, # pylint: disable=unused-argument - is_chief=True): - """Do all training runs. - - This is the top level training function for policy gradient based models. - Run this from the main function. - - Args: - config: config_lib.Config instance containing global config (agent and - environment hparams). If None, config will be parsed from FLAGS.config. - tuner: (unused) A tuner instance. Leave as None if not tuning. - logdir: Parent directory where all data from all runs will be written. If - None, FLAGS.logdir will be used. - trial_name: (unused) If tuning, set this to a unique string that identifies - this trial. If `tuner` is not None, this also must be set. - is_chief: True if this worker is the chief. - - Returns: - List of results dicts which were written to disk. Each training run gets a - results dict. Results dict contains metrics, i.e. (name, value) pairs which - give information about the training run. - - Raises: - ValueError: If FLAGS.num_workers does not divide FLAGS.num_repetitions. - ValueError: If results dicts read from disk contain invalid data. - """ - if not config: - # If custom config is not given, get it from flags. - config = defaults.default_config_with_updates(FLAGS.config) - if not logdir: - logdir = FLAGS.logdir - - if FLAGS.num_repetitions % FLAGS.num_workers != 0: - raise ValueError('Number of workers must divide number of repetitions') - num_local_reps = FLAGS.num_repetitions // FLAGS.num_workers - logging.info('Running %d reps globally.', FLAGS.num_repetitions) - logging.info('This worker will run %d local reps.', num_local_reps) - if FLAGS.max_npe: - max_generations = FLAGS.max_npe // config.batch_size - logging.info('Max samples per rep: %d', FLAGS.max_npe) - logging.info('Max generations per rep: %d', max_generations) - else: - max_generations = sys.maxint - logging.info('Running unlimited generations.') - - assert FLAGS.num_workers > 0 - logging.info('Starting experiment. Directory: "%s"', logdir) - results = results_lib.Results(logdir, FLAGS.task_id) - local_results_list = results.read_this_shard() - if local_results_list: - if local_results_list[0]['max_npe'] != FLAGS.max_npe: - raise ValueError( - 'Cannot resume training. Max-NPE changed. Was %s, now %s', - local_results_list[0]['max_npe'], FLAGS.max_npe) - if local_results_list[0]['max_global_repetitions'] != FLAGS.num_repetitions: - raise ValueError( - 'Cannot resume training. Number of repetitions changed. Was %s, ' - 'now %s', - local_results_list[0]['max_global_repetitions'], - FLAGS.num_repetitions) - start_rep = len(local_results_list) - - for rep in xrange(start_rep, num_local_reps): - global_rep = num_local_reps * FLAGS.task_id + rep - logging.info( - 'Starting repetition: Rep = %d. (global rep = %d)', - rep, global_rep) - - # Save data for each rep, like checkpoints, goes into separate folders. - run_dir = os.path.join(logdir, 'run_%d' % global_rep) - - if not tf.gfile.IsDirectory(run_dir): - tf.gfile.MakeDirs(run_dir) - checkpoint_writer = CheckpointWriter(run_dir, - population_size=config.batch_size) - - data_manager = data.DataManager(config, run_number=global_rep) - task_eval_fn = ga_lib.make_task_eval_fn(data_manager.rl_task) - - if config.agent.algorithm == 'rand': - logging.info('Running random search.') - assert FLAGS.max_npe - result = run_random_search( - FLAGS.max_npe, run_dir, task_eval_fn, config.timestep_limit) - else: - assert config.agent.algorithm == 'ga' - logging.info('Running genetic algorithm.') - pop = ga_lib.make_population( - ga_lib.random_individual(config.timestep_limit), - n=config.batch_size) - hof = utils.MaxUniquePriorityQueue(2) # Hall of fame. - result = ga_lib.ga_loop( - pop, - cxpb=config.agent.crossover_rate, mutpb=config.agent.mutation_rate, - task_eval_fn=task_eval_fn, - ngen=max_generations, halloffame=hof, - checkpoint_writer=checkpoint_writer) - - logging.info('Finished rep. Num gens: %d', result.generations) - - results_dict = { - 'max_npe': FLAGS.max_npe, - 'batch_size': config.batch_size, - 'max_batches': FLAGS.max_npe // config.batch_size, - 'npe': result.num_programs, - 'max_global_repetitions': FLAGS.num_repetitions, - 'max_local_repetitions': num_local_reps, - 'code_solution': result.best_code if result.solution_found else '', - 'best_reward': result.reward, - 'num_batches': result.generations, - 'found_solution': result.solution_found, - 'task': data_manager.task_name, - 'global_rep': global_rep} - logging.info('results_dict: %s', results_dict) - results.append(results_dict) - - if is_chief: - logging.info( - 'Worker is chief. Waiting for all workers to finish so that results ' - 'can be reported to the tuner.') - - global_results_list, shard_stats = results.read_all( - num_shards=FLAGS.num_workers) - while not all(s.finished for s in shard_stats): - logging.info( - 'Still waiting on these workers: %s', - ', '.join( - ['%d (%d reps left)' - % (i, s.max_local_reps - s.num_local_reps_completed) - for i, s in enumerate(shard_stats) - if not s.finished])) - sleep(60) - global_results_list, shard_stats = results.read_all( - num_shards=FLAGS.num_workers) - - logging.info( - '%d results obtained. Chief worker is exiting the experiment.', - len(global_results_list)) - - return global_results_list - - -def run_random_search(max_num_programs, checkpoint_dir, task_eval_fn, - timestep_limit): - """Run uniform random search routine. - - Randomly samples programs from a uniform distribution until either a valid - program is found, or the maximum NPE is reached. Results are written to disk - and returned. - - Args: - max_num_programs: Maximum NPE (number of programs executed). If no solution - is found after this many programs are tried, the run is stopped and - considered a failure. - checkpoint_dir: Where to save state during the run. - task_eval_fn: Function that maps code string to result containing total - reward and info about success. - timestep_limit: Maximum length of code strings. - - Returns: - ga_lib.GaResult namedtuple instance. This contains the best code and highest - reward found. - """ - checkpoint_file = os.path.join(checkpoint_dir, 'random_search.txt') - num_programs_seen = 0 - found_solution = False - best_code = '' - best_reward = 0.0 - if tf.gfile.Exists(checkpoint_file): - try: - with tf.gfile.FastGFile(checkpoint_file, 'r') as f: - lines = list(f) - num_programs_seen = int(lines[0]) - found_solution = bool(int(lines[1])) - if found_solution: - best_code = lines[2] - best_reward = float(lines[3]) - except: # pylint: disable=bare-except - pass - - while not found_solution and num_programs_seen < max_num_programs: - if num_programs_seen % 1000 == 0: - logging.info('num_programs_seen = %d', num_programs_seen) - with tf.gfile.FastGFile(checkpoint_file, 'w') as f: - f.write(str(num_programs_seen) + '\n') - f.write(str(int(found_solution)) + '\n') - - code = np.random.choice(ga_lib.GENES, timestep_limit).tolist() - res = task_eval_fn(code) - found_solution = res.correct - num_programs_seen += 1 - - if found_solution: - best_code = ''.join(code) - best_reward = res.reward - - logging.info('num_programs_seen = %d', num_programs_seen) - logging.info('found solution: %s', found_solution) - with tf.gfile.FastGFile(checkpoint_file, 'w') as f: - f.write(str(num_programs_seen) + '\n') - f.write(str(int(found_solution)) + '\n') - if found_solution: - f.write(best_code + '\n') - f.write(str(best_reward) + '\n') - - return ga_lib.GaResult( - population=[], best_code=best_code, reward=best_reward, - solution_found=found_solution, generations=num_programs_seen, - num_programs=num_programs_seen, max_generations=max_num_programs, - max_num_programs=max_num_programs) diff --git a/spaces/Nee001/bing0/src/components/chat-message.tsx b/spaces/Nee001/bing0/src/components/chat-message.tsx deleted file mode 100644 index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000 --- a/spaces/Nee001/bing0/src/components/chat-message.tsx +++ /dev/null @@ -1,93 +0,0 @@ -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' -import supersub from 'remark-supersub' -import remarkBreaks from 'remark-breaks' -import { cn } from '@/lib/utils' -import { CodeBlock } from '@/components/ui/codeblock' -import { MemoizedReactMarkdown } from '@/components/markdown' -import { LearnMore } from './learn-more' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { useEffect } from 'react' -import { TurnCounter } from './turn-counter' - -export interface ChatMessageProps { - message: ChatMessageModel -} - -export function ChatMessage({ message, ...props }: ChatMessageProps) { - useEffect(() => { - if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) { - window.scrollBy(0, 200) - } - }, [message.text]) - - return message.text ? ( -
      -
      - {obj.alt} - } - } catch (e) { - } - return {obj.alt} - }, - p({ children }) { - return

      {children}

      - }, - code({ node, inline, className, children, ...props }) { - if (children.length) { - if (children[0] == '▍') { - return ( - - ) - } - - children[0] = (children[0] as string).replace('`▍`', '▍') - } - - const match = /language-(\w+)/.exec(className || '') - - if (inline) { - return ( - - {children} - - ) - } - - return ( - - ) - } - }} - > - {message.text} -
      -
      -
      - {message.author === 'bot' && } - {message.author === 'bot' && } -
      -
      - ) : null -} diff --git a/spaces/NeuralInternet/Text-Generation_Playground/modules/ui.py b/spaces/NeuralInternet/Text-Generation_Playground/modules/ui.py deleted file mode 100644 index bb193e35c11b2a3d474ea89e7567206a3343395a..0000000000000000000000000000000000000000 --- a/spaces/NeuralInternet/Text-Generation_Playground/modules/ui.py +++ /dev/null @@ -1,92 +0,0 @@ -import gradio as gr - -refresh_symbol = '\U0001f504' # 🔄 - -css = """ -.tabs.svelte-710i53 { - margin-top: 0 -} -.py-6 { - padding-top: 2.5rem -} -.dark #refresh-button { - background-color: #ffffff1f; -} -#refresh-button { - flex: none; - margin: 0; - padding: 0; - min-width: 50px; - border: none; - box-shadow: none; - border-radius: 10px; - background-color: #0000000d; -} -#download-label, #upload-label { - min-height: 0 -} -#accordion { -} -.dark svg { - fill: white; -} -svg { - display: unset !important; - vertical-align: middle !important; - margin: 5px; -} -ol li p, ul li p { - display: inline-block; -} -""" - -chat_css = """ -.h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx { - height: 66.67vh -} -.gradio-container { - max-width: 800px !important; - margin-left: auto !important; - margin-right: auto !important; -} -.w-screen { - width: unset -} -div.svelte-362y77>*, div.svelte-362y77>.form>* { - flex-wrap: nowrap -} -/* fixes the API documentation in chat mode */ -.api-docs.svelte-1iguv9h.svelte-1iguv9h.svelte-1iguv9h { - display: grid; -} -.pending.svelte-1ed2p3z { - opacity: 1; -} -""" - -class ToolButton(gr.Button, gr.components.FormComponent): - """Small button with single emoji as text, fits inside gradio forms""" - - def __init__(self, **kwargs): - super().__init__(variant="tool", **kwargs) - - def get_block_name(self): - return "button" - -def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id): - def refresh(): - refresh_method() - args = refreshed_args() if callable(refreshed_args) else refreshed_args - - for k, v in args.items(): - setattr(refresh_component, k, v) - - return gr.update(**(args or {})) - - refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) - refresh_button.click( - fn=refresh, - inputs=[], - outputs=[refresh_component] - ) - return refresh_button diff --git a/spaces/NeuroSenko/audio-processing-utils/README.md b/spaces/NeuroSenko/audio-processing-utils/README.md deleted file mode 100644 index fd87949483922dbe320990a02f82a42b3177cbf5..0000000000000000000000000000000000000000 --- a/spaces/NeuroSenko/audio-processing-utils/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: Audio Processing Utils -emoji: 📊 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -How to run locally using Windows: -1. Mare sure you have installed ffmpeg in your system -2. Clone the repo: `git clone https://huggingface.co/spaces/NeuroSenko/audio-processing-utils` -3. Run `install.bat` -4. Run `start.bat` diff --git a/spaces/NickOrion21/stabilityai-stable-diffusion-2-1/app.py b/spaces/NickOrion21/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/NickOrion21/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/NoCrypt/mikuTTS/app.py b/spaces/NoCrypt/mikuTTS/app.py deleted file mode 100644 index a4b8011e6198a0466cadf939119c6a60289f82e5..0000000000000000000000000000000000000000 --- a/spaces/NoCrypt/mikuTTS/app.py +++ /dev/null @@ -1,311 +0,0 @@ -import asyncio -import datetime -import logging -import os -import time -import traceback - -import edge_tts -import gradio as gr -import librosa -import torch -from fairseq import checkpoint_utils -from huggingface_hub import snapshot_download - - -from config import Config -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -from rmvpe import RMVPE -from vc_infer_pipeline import VC - -logging.getLogger("fairseq").setLevel(logging.WARNING) -logging.getLogger("numba").setLevel(logging.WARNING) -logging.getLogger("markdown_it").setLevel(logging.WARNING) -logging.getLogger("urllib3").setLevel(logging.WARNING) -logging.getLogger("matplotlib").setLevel(logging.WARNING) - -limitation = os.getenv("SYSTEM") == "spaces" - -config = Config() - -# Edge TTS -edge_output_filename = "edge_output.mp3" -tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) -tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - -# RVC models -model_root = snapshot_download(repo_id="NoCrypt/miku_RVC", token=os.environ["TOKEN"]) -models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")] -models.sort() - - -def model_data(model_name): - # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file - pth_path = [ - f"{model_root}/{model_name}/{f}" - for f in os.listdir(f"{model_root}/{model_name}") - if f.endswith(".pth") - ][0] - print(f"Loading {pth_path}") - cpt = torch.load(pth_path, map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - version = cpt.get("version", "v1") - if version == "v1": - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif version == "v2": - if if_f0 == 1: - net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - else: - raise ValueError("Unknown version") - del net_g.enc_q - net_g.load_state_dict(cpt["weight"], strict=False) - print("Model loaded") - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - # n_spk = cpt["config"][-3] - - index_files = [ - f"{model_root}/{model_name}/{f}" - for f in os.listdir(f"{model_root}/{model_name}") - if f.endswith(".index") - ] - if len(index_files) == 0: - print("No index file found") - index_file = "" - else: - index_file = index_files[0] - print(f"Index file found: {index_file}") - - return tgt_sr, net_g, vc, version, index_file, if_f0 - - -def load_hubert(): - # global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - return hubert_model.eval() - - -def tts( - model_name, - speed, - tts_text, - tts_voice, - f0_up_key, - f0_method, - index_rate, - protect, - filter_radius=3, - resample_sr=0, - rms_mix_rate=0.25, -): - print("------------------") - print(datetime.datetime.now()) - print("tts_text:") - print(tts_text) - print(f"tts_voice: {tts_voice}, speed: {speed}") - print(f"Model name: {model_name}") - print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}") - try: - if limitation and len(tts_text) > 1000: - print("Error: Text too long") - return ( - f"Text characters should be at most 1000 in this huggingface space, but got {len(tts_text)} characters.", - None, - None, - ) - t0 = time.time() - if speed >= 0: - speed_str = f"+{speed}%" - else: - speed_str = f"{speed}%" - asyncio.run( - edge_tts.Communicate( - tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str - ).save(edge_output_filename) - ) - t1 = time.time() - edge_time = t1 - t0 - audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True) - duration = len(audio) / sr - print(f"Audio duration: {duration}s") - if limitation and duration >= 200: - print("Error: Audio too long") - return ( - f"Audio should be less than 200 seconds in this huggingface space, but got {duration}s.", - edge_output_filename, - None, - ) - f0_up_key = int(f0_up_key) - - tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name) - if f0_method == "rmvpe": - vc.model_rmvpe = rmvpe_model - times = [0, 0, 0] - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - edge_output_filename, - times, - f0_up_key, - f0_method, - index_file, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - None, - ) - if tgt_sr != resample_sr >= 16000: - tgt_sr = resample_sr - info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s" - print(info) - return ( - info, - edge_output_filename, - (tgt_sr, audio_opt), - ) - except EOFError: - info = ( - "It seems that the edge-tts output is not valid. " - "This may occur when the input text and the speaker do not match. " - "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?" - ) - print(info) - return info, None, None - except: - info = traceback.format_exc() - print(info) - return info, None, None - - -print("Loading hubert model...") -hubert_model = load_hubert() -print("Hubert model loaded.") - -print("Loading rmvpe model...") -rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device) -print("rmvpe model loaded.") - -initial_md = """ -![banner that says mikutts](https://huggingface.co/spaces/NoCrypt/mikuTTS/resolve/main/imgs/banner_mikutts.webp) -""" - -app = gr.Blocks(theme='NoCrypt/miku') -with app: - gr.Markdown(initial_md) - with gr.Row(): - with gr.Column(): - model_name = gr.Dropdown( - label="Model", - choices=models, - value=models[0], - ) - f0_key_up = gr.Number( - label="Tune", - value=6, - ) - with gr.Column(): - f0_method = gr.Radio( - label="Pitch extraction method (pm: very fast, low quality, rmvpe: a little slow, high quality)", - choices=["pm", "rmvpe"], # harvest and crepe is too slow - value="rmvpe", - interactive=True, - ) - index_rate = gr.Slider( - minimum=0, - maximum=1, - label="Index rate", - value=1, - interactive=True, - ) - protect0 = gr.Slider( - minimum=0, - maximum=0.5, - label="Protect", - value=0.33, - step=0.01, - interactive=True, - ) - with gr.Row(): - with gr.Column(): - tts_voice = gr.Dropdown( - label="Edge-tts speaker (format: language-Country-Name-Gender), make sure the gender matches the model", - choices=tts_voices, - allow_custom_value=False, - value="ja-JP-NanamiNeural-Female", - ) - speed = gr.Slider( - minimum=-100, - maximum=100, - label="Speech speed (%)", - value=0, - step=10, - interactive=True, - ) - tts_text = gr.Textbox(label="Input Text", value="こんにちは、私の名前は初音ミクです!") - with gr.Column(): - but0 = gr.Button("Convert", variant="primary") - info_text = gr.Textbox(label="Output info") - with gr.Column(): - with gr.Accordion("Edge Voice", open=False): - edge_tts_output = gr.Audio(label="Edge Voice", type="filepath") - tts_output = gr.Audio(label="Result") - but0.click( - tts, - [ - model_name, - speed, - tts_text, - tts_voice, - f0_key_up, - f0_method, - index_rate, - protect0, - ], - [info_text, edge_tts_output, tts_output], - ) - with gr.Row(): - examples = gr.Examples( - examples_per_page=100, - examples=[ - ["こんにちは、私の名前は初音ミクです!", "ja-JP-NanamiNeural-Female", 6], - ["Hello there. My name is Hatsune Miku!","en-CA-ClaraNeural-Female", 6], - ["Halo. Nama saya Hatsune Miku!","id-ID-GadisNeural-Female", 4], - ["Halo. Jenengku Hatsune Miku!","jv-ID-SitiNeural-Female", 10], - ], - inputs=[tts_text, tts_voice, f0_key_up], - ) - -app.launch() diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py deleted file mode 100644 index eb5d819da5121a243e345b3812292ef0b13ccf98..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/models/wav2vec/wav2vec2_asr.py +++ /dev/null @@ -1,664 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from argparse import Namespace -import contextlib -import copy -import math -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from dataclasses import dataclass, field -from omegaconf import MISSING, II, open_dict -from typing import Any, Optional - -from fairseq import checkpoint_utils, tasks, utils -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.tasks import FairseqTask -from fairseq.models import ( - BaseFairseqModel, - FairseqEncoder, - FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, - register_model, -) -from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES -from fairseq.modules import ( - LayerNorm, - PositionalEmbedding, - TransformerDecoderLayer, -) - - -@dataclass -class Wav2Vec2AsrConfig(FairseqDataclass): - w2v_path: str = field( - default=MISSING, metadata={"help": "path to wav2vec 2.0 model"} - ) - no_pretrained_weights: bool = field( - default=False, metadata={"help": "if true, does not load pretrained weights"} - ) - dropout_input: float = field( - default=0.0, - metadata={"help": "dropout to apply to the input (after feat extr)"}, - ) - final_dropout: float = field( - default=0.0, - metadata={"help": "dropout after transformer and before final projection"}, - ) - dropout: float = field( - default=0.0, metadata={"help": "dropout probability inside wav2vec 2.0 model"} - ) - attention_dropout: float = field( - default=0.0, - metadata={ - "help": "dropout probability for attention weights inside wav2vec 2.0 model" - }, - ) - activation_dropout: float = field( - default=0.0, - metadata={ - "help": "dropout probability after activation in FFN inside wav2vec 2.0 model" - }, - ) - conv_feature_layers: Optional[str] = field( - default="[(512, 10, 5)] + [(512, 3, 2)] * 4 + [(512,2,2)] + [(512,2,2)]", - metadata={ - "help": ( - "string describing convolutional feature extraction " - "layers in form of a python list that contains " - "[(dim, kernel_size, stride), ...]" - ), - }, - ) - encoder_embed_dim: Optional[int] = field( - default=768, metadata={"help": "encoder embedding dimension"} - ) - - # masking - apply_mask: bool = field( - default=False, metadata={"help": "apply masking during fine-tuning"} - ) - mask_length: int = field( - default=10, metadata={"help": "repeat the mask indices multiple times"} - ) - mask_prob: float = field( - default=0.5, - metadata={ - "help": "probability of replacing a token with mask (normalized by length)" - }, - ) - mask_selection: MASKING_DISTRIBUTION_CHOICES = field( - default="static", metadata={"help": "how to choose masks"} - ) - mask_other: float = field( - default=0, - metadata={ - "help": "secondary mask argument (used for more complex distributions), " - "see help in compute_mask_indices" - }, - ) - no_mask_overlap: bool = field( - default=False, metadata={"help": "whether to allow masks to overlap"} - ) - mask_min_space: Optional[int] = field( - default=1, - metadata={"help": "min space between spans (if no overlap is enabled)"}, - ) - - # channel masking - mask_channel_length: int = field( - default=10, metadata={"help": "length of the mask for features (channels)"} - ) - mask_channel_prob: float = field( - default=0.0, metadata={"help": "probability of replacing a feature with 0"} - ) - mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field( - default="static", - metadata={"help": "how to choose mask length for channel masking"}, - ) - mask_channel_other: float = field( - default=0, - metadata={ - "help": "secondary mask argument (used for more complex distributions), " - "see help in compute_mask_indicesh" - }, - ) - no_mask_channel_overlap: bool = field( - default=False, metadata={"help": "whether to allow channel masks to overlap"} - ) - freeze_finetune_updates: int = field( - default=0, metadata={"help": "dont finetune wav2vec for this many updates"} - ) - feature_grad_mult: float = field( - default=0.0, metadata={"help": "reset feature grad mult in wav2vec 2.0 to this"} - ) - layerdrop: float = field( - default=0.0, metadata={"help": "probability of dropping a layer in wav2vec 2.0"} - ) - mask_channel_min_space: Optional[int] = field( - default=1, - metadata={"help": "min space between spans (if no overlap is enabled)"}, - ) - mask_channel_before: bool = False - normalize: bool = II("task.normalize") - data: str = II("task.data") - # this holds the loaded wav2vec args - w2v_args: Any = None - - -@dataclass -class Wav2Vec2CtcConfig(Wav2Vec2AsrConfig): - blank_weight: float = 0 - blank_mode: str = "add" - - -@register_model("wav2vec_ctc", dataclass=Wav2Vec2CtcConfig) -class Wav2VecCtc(BaseFairseqModel): - def __init__(self, cfg: Wav2Vec2CtcConfig, w2v_encoder: BaseFairseqModel): - super().__init__() - self.cfg = cfg - self.w2v_encoder = w2v_encoder - self.blank_weight = cfg.blank_weight - self.blank_mode = cfg.blank_mode - - def upgrade_state_dict_named(self, state_dict, name): - super().upgrade_state_dict_named(state_dict, name) - return state_dict - - @classmethod - def build_model(cls, cfg: Wav2Vec2CtcConfig, task: FairseqTask): - """Build a new model instance.""" - w2v_encoder = Wav2VecEncoder(cfg, len(task.target_dictionary)) - return cls(cfg, w2v_encoder) - - def get_logits(self, net_output, normalize=False): - logits = net_output["encoder_out"] - if self.blank_weight != 0: - if self.blank_mode == "add": - logits[..., 0] += self.blank_weight - elif self.blank_mode == "set": - logits[..., 0] = self.blank_weight - else: - raise Exception(f"invalid blank mode {self.blank_mode}") - - if net_output["padding_mask"] is not None and net_output["padding_mask"].any(): - logits[net_output["padding_mask"].T][..., 0] = float("inf") - logits[net_output["padding_mask"].T][..., 1:] = float("-inf") - - if normalize: - logits = utils.log_softmax(logits.float(), dim=-1) - - return logits - - def get_normalized_probs(self, net_output, log_probs): - """Get normalized probabilities (or log probs) from a net's output.""" - - logits = self.get_logits(net_output) - - if log_probs: - return utils.log_softmax(logits.float(), dim=-1) - else: - return utils.softmax(logits.float(), dim=-1) - - def forward(self, **kwargs): - x = self.w2v_encoder(**kwargs) - return x - - -@dataclass -class Wav2Vec2Seq2SeqConfig(Wav2Vec2AsrConfig): - decoder_embed_dim: int = field( - default=768, metadata={"help": "decoder embedding dimension"} - ) - decoder_ffn_embed_dim: int = field( - default=3072, metadata={"help": "decoder embedding dimension for FFN"} - ) - decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"}) - decoder_layerdrop: float = field( - default=0.0, metadata={"help": "decoder layerdrop chance"} - ) - decoder_attention_heads: int = field( - default=4, metadata={"help": "num decoder attention heads"} - ) - decoder_learned_pos: bool = field( - default=False, - metadata={"help": "use learned positional embeddings in the decoder"}, - ) - decoder_normalize_before: bool = field( - default=False, metadata={"help": "apply layernorm before each decoder block"} - ) - no_token_positional_embeddings: bool = field( - default=False, - metadata={ - "help": "if set, disables positional embeddings (outside self attention)" - }, - ) - decoder_dropout: float = field( - default=0.0, metadata={"help": "dropout probability in the decoder"} - ) - decoder_attention_dropout: float = field( - default=0.0, - metadata={ - "help": "dropout probability for attention weights inside the decoder" - }, - ) - decoder_activation_dropout: float = field( - default=0.0, - metadata={ - "help": "dropout probability after activation in FFN inside the decoder" - }, - ) - max_target_positions: int = field( - default=2048, metadata={"help": "max target positions"} - ) - share_decoder_input_output_embed: bool = field( - default=False, metadata={"help": "share decoder input and output embeddings"} - ) - autoregressive: bool = II("task.autoregressive") - - -@register_model("wav2vec_seq2seq", dataclass=Wav2Vec2Seq2SeqConfig) -class Wav2Vec2Seq2SeqModel(FairseqEncoderDecoderModel): - def __init__(self, encoder, decoder): - super().__init__(encoder, decoder) - - @classmethod - def build_model(cls, cfg: Wav2Vec2Seq2SeqConfig, task: FairseqTask): - """Build a new model instance.""" - - assert ( - cfg.autoregressive - ), "Please set task.autoregressive=true for seq2seq asr models" - - src_dict, tgt_dict = task.source_dictionary, task.target_dictionary - - def build_embedding(dictionary, embed_dim): - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - emb = Embedding(num_embeddings, embed_dim, padding_idx) - return emb - - decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim) - - encoder = cls.build_encoder(cfg) - decoder = cls.build_decoder(cfg, tgt_dict, decoder_embed_tokens) - - return Wav2Vec2Seq2SeqModel(encoder, decoder) - - @classmethod - def build_encoder(cls, cfg: Wav2Vec2AsrConfig): - return Wav2VecEncoder(cfg) - - @classmethod - def build_decoder(cls, cfg: Wav2Vec2Seq2SeqConfig, tgt_dict, embed_tokens): - return TransformerDecoder(cfg, tgt_dict, embed_tokens) - - def forward(self, **kwargs): - encoder_out = self.encoder(**kwargs) - decoder_out = self.decoder(encoder_out=encoder_out, **kwargs) - return decoder_out - - def upgrade_state_dict_named(self, state_dict, name): - super().upgrade_state_dict_named(state_dict, name) - return state_dict - - -class Wav2VecEncoder(FairseqEncoder): - def __init__(self, cfg: Wav2Vec2AsrConfig, output_size=None): - self.apply_mask = cfg.apply_mask - - arg_overrides = { - "dropout": cfg.dropout, - "activation_dropout": cfg.activation_dropout, - "dropout_input": cfg.dropout_input, - "attention_dropout": cfg.attention_dropout, - "mask_length": cfg.mask_length, - "mask_prob": cfg.mask_prob, - "mask_selection": cfg.mask_selection, - "mask_other": cfg.mask_other, - "no_mask_overlap": cfg.no_mask_overlap, - "mask_channel_length": cfg.mask_channel_length, - "mask_channel_prob": cfg.mask_channel_prob, - "mask_channel_before": cfg.mask_channel_before, - "mask_channel_selection": cfg.mask_channel_selection, - "mask_channel_other": cfg.mask_channel_other, - "no_mask_channel_overlap": cfg.no_mask_channel_overlap, - "encoder_layerdrop": cfg.layerdrop, - "feature_grad_mult": cfg.feature_grad_mult, - } - - if cfg.w2v_args is None: - state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides) - w2v_args = state.get("cfg", None) - if w2v_args is None: - w2v_args = convert_namespace_to_omegaconf(state["args"]) - w2v_args.criterion = None - w2v_args.lr_scheduler = None - cfg.w2v_args = w2v_args - else: - state = None - w2v_args = cfg.w2v_args - if isinstance(w2v_args, Namespace): - cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args) - - assert cfg.normalize == w2v_args.task.normalize, ( - "Fine-tuning works best when data normalization is the same. " - "Please check that --normalize is set or unset for both pre-training and here" - ) - - w2v_args.task.data = cfg.data - task = tasks.setup_task(w2v_args.task) - model = task.build_model(w2v_args.model) - - if state is not None and not cfg.no_pretrained_weights: - model.load_state_dict(state["model"], strict=True) - - model.remove_pretraining_modules() - - super().__init__(task.source_dictionary) - - d = w2v_args.model.encoder_embed_dim - - self.w2v_model = model - - self.final_dropout = nn.Dropout(cfg.final_dropout) - self.freeze_finetune_updates = cfg.freeze_finetune_updates - self.num_updates = 0 - - targ_d = None - self.proj = None - - if output_size is not None: - targ_d = output_size - elif getattr(cfg, "decoder_embed_dim", d) != d: - targ_d = cfg.decoder_embed_dim - - if targ_d is not None: - self.proj = Linear(d, targ_d) - - def set_num_updates(self, num_updates): - """Set the number of parameters updates.""" - super().set_num_updates(num_updates) - self.num_updates = num_updates - - def forward(self, source, padding_mask, **kwargs): - - w2v_args = { - "source": source, - "padding_mask": padding_mask, - "mask": self.apply_mask and self.training, - } - - ft = self.freeze_finetune_updates <= self.num_updates - - with torch.no_grad() if not ft else contextlib.ExitStack(): - res = self.w2v_model.extract_features(**w2v_args) - - x = res["x"] - padding_mask = res["padding_mask"] - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - x = self.final_dropout(x) - - if self.proj: - x = self.proj(x) - - return { - "encoder_out": x, # T x B x C - "padding_mask": padding_mask, # B x T, - "layer_results": res["layer_results"], - } - - def forward_torchscript(self, net_input): - if torch.jit.is_scripting(): - return self.forward(net_input["source"], net_input["padding_mask"]) - else: - return self.forward_non_torchscript(net_input) - - def reorder_encoder_out(self, encoder_out, new_order): - if encoder_out["encoder_out"] is not None: - encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( - 1, new_order - ) - if encoder_out["padding_mask"] is not None: - encoder_out["padding_mask"] = encoder_out[ - "padding_mask" - ].index_select(0, new_order) - return encoder_out - - def max_positions(self): - """Maximum input length supported by the encoder.""" - return None - - def upgrade_state_dict_named(self, state_dict, name): - return state_dict - - -class TransformerDecoder(FairseqIncrementalDecoder): - """ - Transformer decoder consisting of *args.decoder_layers* layers. Each layer - is a :class:`TransformerDecoderLayer`. - - Args: - args (argparse.Namespace): parsed command-line arguments - dictionary (~fairseq.data.Dictionary): decoding dictionary - embed_tokens (torch.nn.Embedding): output embedding - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - """ - - def __init__( - self, - cfg: Wav2Vec2Seq2SeqConfig, - dictionary, - embed_tokens, - no_encoder_attn=False, - ): - super().__init__(dictionary) - - self.dropout = cfg.decoder_dropout - self.share_input_output_embed = cfg.share_decoder_input_output_embed - - input_embed_dim = embed_tokens.embedding_dim - embed_dim = cfg.decoder_embed_dim - self.output_embed_dim = cfg.decoder_embed_dim - - self.layerdrop = cfg.decoder_layerdrop - - self.padding_idx = embed_tokens.padding_idx - self.max_target_positions = cfg.max_target_positions - - self.embed_tokens = embed_tokens - self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim - - self.project_in_dim = ( - Linear(input_embed_dim, embed_dim, bias=False) - if embed_dim != input_embed_dim - else None - ) - - self.embed_positions = ( - PositionalEmbedding( - cfg.max_target_positions, - embed_dim, - self.padding_idx, - learned=cfg.decoder_learned_pos, - ) - if not cfg.no_token_positional_embeddings - else None - ) - - # TODO: update this when transformer gets converted to dataclass configs - transformer_cfg = copy.deepcopy(cfg) - with open_dict(transformer_cfg): - transformer_cfg.dropout = transformer_cfg.decoder_dropout - transformer_cfg.attention_dropout = ( - transformer_cfg.decoder_attention_dropout - ) - transformer_cfg.activation_dropout = ( - transformer_cfg.decoder_activation_dropout - ) - - self.layers = nn.ModuleList([]) - self.layers.extend( - [ - TransformerDecoderLayer(transformer_cfg, no_encoder_attn) - for _ in range(transformer_cfg.decoder_layers) - ] - ) - - if not self.share_input_output_embed: - self.embed_out = nn.Parameter( - torch.Tensor(len(dictionary), self.output_embed_dim) - ) - nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5) - - if transformer_cfg.decoder_normalize_before: - self.layer_norm = LayerNorm(embed_dim) - else: - self.layer_norm = None - - def forward( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused - ): - """ - Args: - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for teacher forcing - encoder_out (Tensor, optional): output from the encoder, used for - encoder-side attention - incremental_state (dict): dictionary used for storing state during - :ref:`Incremental decoding` - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - prev_output_tokens = prev_output_tokens.long() - x, extra = self.extract_features( - prev_output_tokens, encoder_out, incremental_state - ) - x = self.output_layer(x) - return x, extra - - def extract_features( - self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused - ): - """ - Similar to *forward* but only return features. - - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - - # embed positions - positions = ( - self.embed_positions( - prev_output_tokens, incremental_state=incremental_state - ) - if self.embed_positions is not None - else None - ) - - if incremental_state is not None: - prev_output_tokens = prev_output_tokens[:, -1:] - if positions is not None: - positions = positions[:, -1:] - - # embed tokens and positions - x = self.embed_scale * self.embed_tokens(prev_output_tokens) - - if self.project_in_dim is not None: - x = self.project_in_dim(x) - - if positions is not None: - x += positions - x = F.dropout(x, p=self.dropout, training=self.training) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - attn = None - - inner_states = [x] - - # decoder layers - self_attn_padding_mask = None - if prev_output_tokens.eq(self.padding_idx).any(): - self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) - for layer in self.layers: - dropout_probability = np.random.random() - if not self.training or (dropout_probability > self.layerdrop): - x, attn, _ = layer( - x, - encoder_out["encoder_out"] if encoder_out is not None else None, - encoder_out["padding_mask"] if encoder_out is not None else None, - incremental_state, - self_attn_mask=self.buffered_future_mask(x) - if incremental_state is None - else None, - self_attn_padding_mask=self_attn_padding_mask - ) - inner_states.append(x) - - if self.layer_norm: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - return x, {"attn": attn, "inner_states": inner_states} - - def output_layer(self, features, **kwargs): - """Project features to the vocabulary size.""" - # project back to size of vocabulary - if self.share_input_output_embed: - return F.linear(features, self.embed_tokens.weight) - else: - return F.linear(features, self.embed_out) - - def max_positions(self): - """Maximum output length supported by the decoder.""" - if self.embed_positions is None: - return self.max_target_positions - return min(self.max_target_positions, self.embed_positions.max_positions) - - def buffered_future_mask(self, tensor): - dim = tensor.size(0) - if ( - not hasattr(self, "_future_mask") - or self._future_mask is None - or self._future_mask.device != tensor.device - or self._future_mask.size(0) < dim - ): - self._future_mask = torch.triu( - utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 - ) - return self._future_mask[:dim, :dim] - - def upgrade_state_dict_named(self, state_dict, name): - return state_dict - - -def Embedding(num_embeddings, embedding_dim, padding_idx): - m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) - nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) - nn.init.constant_(m.weight[padding_idx], 0) - return m - - -def Linear(in_features, out_features, bias=True): - m = nn.Linear(in_features, out_features, bias) - nn.init.xavier_uniform_(m.weight) - if bias: - nn.init.constant_(m.bias, 0.0) - return m diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_metrics.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_metrics.py deleted file mode 100644 index 2de6969cf4445bc6cda44dacf6de765ea30d5f5b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/tests/test_metrics.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest -import uuid - -from fairseq import metrics - - -class TestMetrics(unittest.TestCase): - def test_nesting(self): - with metrics.aggregate() as a: - metrics.log_scalar("loss", 1) - with metrics.aggregate() as b: - metrics.log_scalar("loss", 2) - - self.assertEqual(a.get_smoothed_values()["loss"], 1.5) - self.assertEqual(b.get_smoothed_values()["loss"], 2) - - def test_new_root(self): - with metrics.aggregate() as a: - metrics.log_scalar("loss", 1) - with metrics.aggregate(new_root=True) as b: - metrics.log_scalar("loss", 2) - - self.assertEqual(a.get_smoothed_values()["loss"], 1) - self.assertEqual(b.get_smoothed_values()["loss"], 2) - - def test_nested_new_root(self): - with metrics.aggregate() as layer1: - metrics.log_scalar("loss", 1) - with metrics.aggregate(new_root=True) as layer2: - metrics.log_scalar("loss", 2) - with metrics.aggregate() as layer3: - metrics.log_scalar("loss", 3) - with metrics.aggregate(new_root=True) as layer4: - metrics.log_scalar("loss", 4) - metrics.log_scalar("loss", 1.5) - - self.assertEqual(layer4.get_smoothed_values()["loss"], 4) - self.assertEqual(layer3.get_smoothed_values()["loss"], 3) - self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5) - self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25) - - def test_named(self): - name = str(uuid.uuid4()) - metrics.reset_meters(name) - - with metrics.aggregate(name): - metrics.log_scalar("loss", 1) - - metrics.log_scalar("loss", 3) - - with metrics.aggregate(name): - metrics.log_scalar("loss", 2) - - self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5) - - def test_nested_duplicate_names(self): - name = str(uuid.uuid4()) - metrics.reset_meters(name) - - with metrics.aggregate(name): - metrics.log_scalar("loss", 1) - with metrics.aggregate() as other: - with metrics.aggregate(name): - metrics.log_scalar("loss", 2) - metrics.log_scalar("loss", 6) - - self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3) - self.assertEqual(other.get_smoothed_values()["loss"], 2) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/ctc.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/ctc.py deleted file mode 100644 index 10e3618382c86a84466cb4264d62f31537980251..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/criterions/ctc.py +++ /dev/null @@ -1,295 +0,0 @@ -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import math -from argparse import Namespace -from dataclasses import dataclass, field -from omegaconf import II -from typing import Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.data.data_utils import post_process -from fairseq.tasks import FairseqTask -from fairseq.logging.meters import safe_round - - -@dataclass -class CtcCriterionConfig(FairseqDataclass): - zero_infinity: bool = field( - default=False, - metadata={"help": "zero inf loss when source length <= target length"}, - ) - sentence_avg: bool = II("optimization.sentence_avg") - post_process: str = field( - default="letter", - metadata={ - "help": "how to post process predictions into words. can be letter, " - "wordpiece, BPE symbols, etc. " - "See fairseq.data.data_utils.post_process() for full list of options" - }, - ) - wer_kenlm_model: Optional[str] = field( - default=None, - metadata={ - "help": "if this is provided, use kenlm to compute wer (along with other wer_* args)" - }, - ) - wer_lexicon: Optional[str] = field( - default=None, - metadata={"help": "lexicon to use with wer_kenlm_model"}, - ) - wer_lm_weight: float = field( - default=2.0, - metadata={"help": "lm weight to use with wer_kenlm_model"}, - ) - wer_word_score: float = field( - default=-1.0, - metadata={"help": "lm word score to use with wer_kenlm_model"}, - ) - - wer_args: Optional[str] = field( - default=None, - metadata={ - "help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)" - }, - ) - - -@register_criterion("ctc", dataclass=CtcCriterionConfig) -class CtcCriterion(FairseqCriterion): - def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask): - super().__init__(task) - self.blank_idx = ( - task.target_dictionary.index(task.blank_symbol) - if hasattr(task, "blank_symbol") - else 0 - ) - self.pad_idx = task.target_dictionary.pad() - self.eos_idx = task.target_dictionary.eos() - self.post_process = cfg.post_process - - if cfg.wer_args is not None: - ( - cfg.wer_kenlm_model, - cfg.wer_lexicon, - cfg.wer_lm_weight, - cfg.wer_word_score, - ) = eval(cfg.wer_args) - - if cfg.wer_kenlm_model is not None: - from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder - - dec_args = Namespace() - dec_args.nbest = 1 - dec_args.criterion = "ctc" - dec_args.kenlm_model = cfg.wer_kenlm_model - dec_args.lexicon = cfg.wer_lexicon - dec_args.beam = 50 - dec_args.beam_size_token = min(50, len(task.target_dictionary)) - dec_args.beam_threshold = min(50, len(task.target_dictionary)) - dec_args.lm_weight = cfg.wer_lm_weight - dec_args.word_score = cfg.wer_word_score - dec_args.unk_weight = -math.inf - dec_args.sil_weight = 0 - - self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) - else: - self.w2l_decoder = None - - self.zero_infinity = cfg.zero_infinity - self.sentence_avg = cfg.sentence_avg - - def forward(self, model, sample, reduce=True): - net_output = model(**sample["net_input"]) - lprobs = model.get_normalized_probs( - net_output, log_probs=True - ).contiguous() # (T, B, C) from the encoder - - if "src_lengths" in sample["net_input"]: - input_lengths = sample["net_input"]["src_lengths"] - else: - if net_output["padding_mask"] is not None: - non_padding_mask = ~net_output["padding_mask"] - input_lengths = non_padding_mask.long().sum(-1) - else: - input_lengths = lprobs.new_full( - (lprobs.size(1),), lprobs.size(0), dtype=torch.long - ) - - pad_mask = (sample["target"] != self.pad_idx) & ( - sample["target"] != self.eos_idx - ) - targets_flat = sample["target"].masked_select(pad_mask) - if "target_lengths" in sample: - target_lengths = sample["target_lengths"] - else: - target_lengths = pad_mask.sum(-1) - - with torch.backends.cudnn.flags(enabled=False): - loss = F.ctc_loss( - lprobs, - targets_flat, - input_lengths, - target_lengths, - blank=self.blank_idx, - reduction="sum", - zero_infinity=self.zero_infinity, - ) - - ntokens = ( - sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item() - ) - - sample_size = sample["target"].size(0) if self.sentence_avg else ntokens - logging_output = { - "loss": utils.item(loss.data), # * sample['ntokens'], - "ntokens": ntokens, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - } - - if not model.training: - import editdistance - - with torch.no_grad(): - lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu() - - c_err = 0 - c_len = 0 - w_errs = 0 - w_len = 0 - wv_errs = 0 - for lp, t, inp_l in zip( - lprobs_t, - sample["target_label"] - if "target_label" in sample - else sample["target"], - input_lengths, - ): - lp = lp[:inp_l].unsqueeze(0) - - decoded = None - if self.w2l_decoder is not None: - decoded = self.w2l_decoder.decode(lp) - if len(decoded) < 1: - decoded = None - else: - decoded = decoded[0] - if len(decoded) < 1: - decoded = None - else: - decoded = decoded[0] - - p = (t != self.task.target_dictionary.pad()) & ( - t != self.task.target_dictionary.eos() - ) - targ = t[p] - targ_units = self.task.target_dictionary.string(targ) - targ_units_arr = targ.tolist() - - toks = lp.argmax(dim=-1).unique_consecutive() - pred_units_arr = toks[toks != self.blank_idx].tolist() - - c_err += editdistance.eval(pred_units_arr, targ_units_arr) - c_len += len(targ_units_arr) - - targ_words = post_process(targ_units, self.post_process).split() - - pred_units = self.task.target_dictionary.string(pred_units_arr) - pred_words_raw = post_process(pred_units, self.post_process).split() - - if decoded is not None and "words" in decoded: - pred_words = decoded["words"] - w_errs += editdistance.eval(pred_words, targ_words) - wv_errs += editdistance.eval(pred_words_raw, targ_words) - else: - dist = editdistance.eval(pred_words_raw, targ_words) - w_errs += dist - wv_errs += dist - - w_len += len(targ_words) - - logging_output["wv_errors"] = wv_errs - logging_output["w_errors"] = w_errs - logging_output["w_total"] = w_len - logging_output["c_errors"] = c_err - logging_output["c_total"] = c_len - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - nsentences = utils.item( - sum(log.get("nsentences", 0) for log in logging_outputs) - ) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar("ntokens", ntokens) - metrics.log_scalar("nsentences", nsentences) - if sample_size != ntokens: - metrics.log_scalar( - "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - - c_errors = sum(log.get("c_errors", 0) for log in logging_outputs) - metrics.log_scalar("_c_errors", c_errors) - c_total = sum(log.get("c_total", 0) for log in logging_outputs) - metrics.log_scalar("_c_total", c_total) - w_errors = sum(log.get("w_errors", 0) for log in logging_outputs) - metrics.log_scalar("_w_errors", w_errors) - wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs) - metrics.log_scalar("_wv_errors", wv_errors) - w_total = sum(log.get("w_total", 0) for log in logging_outputs) - metrics.log_scalar("_w_total", w_total) - - if c_total > 0: - metrics.log_derived( - "uer", - lambda meters: safe_round( - meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3 - ) - if meters["_c_total"].sum > 0 - else float("nan"), - ) - if w_total > 0: - metrics.log_derived( - "wer", - lambda meters: safe_round( - meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3 - ) - if meters["_w_total"].sum > 0 - else float("nan"), - ) - metrics.log_derived( - "raw_wer", - lambda meters: safe_round( - meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3 - ) - if meters["_w_total"].sum > 0 - else float("nan"), - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/space_tokenizer.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/space_tokenizer.py deleted file mode 100644 index 925ad41b7c1aee6738c63938c36bd3ee16dca812..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/data/encoders/space_tokenizer.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import re - -from fairseq.data.encoders import register_tokenizer -from fairseq.dataclass import FairseqDataclass - - -@register_tokenizer("space", dataclass=FairseqDataclass) -class SpaceTokenizer(object): - def __init__(self, *unused): - self.space_tok = re.compile(r"\s+") - - def encode(self, x: str) -> str: - return self.space_tok.sub(" ", x) - - def decode(self, x: str) -> str: - return x diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/speech_to_text.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/speech_to_text.py deleted file mode 100644 index 06e292103ef898d607eb23441ce840de1fc800a1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/tasks/speech_to_text.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from pathlib import Path -from argparse import Namespace - -from fairseq.data import Dictionary, encoders -from fairseq.data.audio.speech_to_text_dataset import ( - S2TDataConfig, - SpeechToTextDataset, - SpeechToTextDatasetCreator, - get_features_or_waveform -) -from fairseq.tasks import LegacyFairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@register_task("speech_to_text") -class SpeechToTextTask(LegacyFairseqTask): - @classmethod - def add_args(cls, parser): - parser.add_argument("data", help="manifest root path") - parser.add_argument( - "--config-yaml", - type=str, - default="config.yaml", - help="Configuration YAML filename (under manifest root)", - ) - parser.add_argument( - "--max-source-positions", - default=6000, - type=int, - metavar="N", - help="max number of tokens in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - - def __init__(self, args, tgt_dict): - super().__init__(args) - self.tgt_dict = tgt_dict - self.data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml) - self.speaker_to_id = self._get_speaker_to_id() - - def _get_speaker_to_id(self): - speaker_to_id = None - speaker_set_filename = self.data_cfg.config.get("speaker_set_filename") - if speaker_set_filename is not None: - speaker_set_path = Path(self.args.data) / speaker_set_filename - with open(speaker_set_path) as f: - speaker_to_id = {r.strip(): i for i, r in enumerate(f)} - return speaker_to_id - - @classmethod - def setup_task(cls, args, **kwargs): - data_cfg = S2TDataConfig(Path(args.data) / args.config_yaml) - dict_path = Path(args.data) / data_cfg.vocab_filename - if not dict_path.is_file(): - raise FileNotFoundError(f"Dict not found: {dict_path.as_posix()}") - tgt_dict = Dictionary.load(dict_path.as_posix()) - logger.info( - f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}" - ) - - if getattr(args, "train_subset", None) is not None: - if not all(s.startswith("train") for s in args.train_subset.split(",")): - raise ValueError('Train splits should be named like "train*".') - return cls(args, tgt_dict) - - def build_criterion(self, args): - from fairseq import criterions - - if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1: - raise ValueError( - 'Please set "--ignore-prefix-size 1" since ' - "target language ID token is prepended as BOS." - ) - return criterions.build_criterion(args, self) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - is_train_split = split.startswith("train") - pre_tokenizer = self.build_tokenizer(self.args) - bpe_tokenizer = self.build_bpe(self.args) - self.datasets[split] = SpeechToTextDatasetCreator.from_tsv( - self.args.data, - self.data_cfg, - split, - self.tgt_dict, - pre_tokenizer, - bpe_tokenizer, - is_train_split=is_train_split, - epoch=epoch, - seed=self.args.seed, - speaker_to_id=self.speaker_to_id - ) - - @property - def target_dictionary(self): - return self.tgt_dict - - @property - def source_dictionary(self): - return None - - def max_positions(self): - return self.args.max_source_positions, self.args.max_target_positions - - def build_model(self, args): - args.input_feat_per_channel = self.data_cfg.input_feat_per_channel - args.input_channels = self.data_cfg.input_channels - args.speaker_to_id = self.speaker_to_id - return super(SpeechToTextTask, self).build_model(args) - - def build_generator( - self, - models, - args, - seq_gen_cls=None, - extra_gen_cls_kwargs=None, - ): - if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1: - raise ValueError( - 'Please set "--prefix-size 1" since ' - "target language ID token is prepended as BOS." - ) - lang_token_ids = { - i - for s, i in self.tgt_dict.indices.items() - if SpeechToTextDataset.is_lang_tag(s) - } - - if extra_gen_cls_kwargs is None: - extra_gen_cls_kwargs = {} - extra_gen_cls_kwargs["symbols_to_strip_from_output"] = lang_token_ids - return super().build_generator( - models, args, seq_gen_cls=None, - extra_gen_cls_kwargs=extra_gen_cls_kwargs - ) - - def build_tokenizer(self, args): - logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}") - return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer)) - - def build_bpe(self, args): - logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}") - return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer)) - - def get_interactive_tokens_and_lengths(self, lines, encode_fn): - n_frames = [get_features_or_waveform(p).shape[0] for p in lines] - return lines, n_frames - - def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): - return SpeechToTextDataset( - "interactive", False, self.data_cfg, src_tokens, src_lengths - ) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_model.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_model.py deleted file mode 100644 index ff26e4fe655d8e8d7f9942c4bd3df7cd267405fb..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/benchmark/dummy_model.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch.nn as nn -import torch.nn.functional as F -from fairseq.data import Dictionary -from fairseq.models import ( - FairseqDecoder, - FairseqLanguageModel, - register_model, - register_model_architecture, -) - - -@register_model("dummy_model") -class DummyModel(FairseqLanguageModel): - def __init__(self, args, encoder): - super().__init__(encoder) - self.args = args - - @staticmethod - def add_args(parser): - parser.add_argument("--num-layers", type=int, default=24) - parser.add_argument("--embed-dim", type=int, default=1024) - - @classmethod - def build_model(cls, args, task): - encoder = DummyEncoder( - num_embed=len(task.target_dictionary), - embed_dim=args.embed_dim, - num_layers=args.num_layers, - ) - return cls(args, encoder) - - def forward(self, src_tokens, masked_tokens=None, **kwargs): - return self.decoder(src_tokens, masked_tokens=masked_tokens) - - -class DummyEncoder(FairseqDecoder): - def __init__(self, num_embed=50000, embed_dim=1024, num_layers=24): - super().__init__(Dictionary()) - self.embed = nn.Embedding( - num_embeddings=num_embed, embedding_dim=embed_dim, padding_idx=0 - ) - self.layers_a = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 3 * embed_dim), # q, k, v input projection - nn.Linear(3 * embed_dim, embed_dim), # skip self-attention - nn.Linear(embed_dim, embed_dim), # output projection - nn.Dropout(), - ) - for i in range(num_layers) - ] - ) - self.layers_b = nn.ModuleList( - [ - nn.Sequential( - nn.LayerNorm(embed_dim), - nn.Linear(embed_dim, 4 * embed_dim), # FFN - nn.ReLU(), - nn.Linear(4 * embed_dim, embed_dim), # FFN - nn.Dropout(0.1), - ) - for i in range(num_layers) - ] - ) - self.out_proj = nn.Linear(embed_dim, num_embed) - - def forward(self, tokens, masked_tokens=None): - x = self.embed(tokens) - for layer_a, layer_b in zip(self.layers_a, self.layers_b): - x = x + layer_a(x) - x = x + layer_b(x) - x = self.out_proj(x) - if masked_tokens is not None: - x = x[masked_tokens] - return (x,) - - def max_positions(self): - return 1024 - - def get_normalized_probs(self, net_output, log_probs, sample=None): - logits = net_output[0].float() - if log_probs: - return F.log_softmax(logits, dim=-1) - else: - return F.softmax(logits, dim=-1) - - -@register_model_architecture("dummy_model", "dummy_model") -def base_architecture(args): - pass diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/logging/meters.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/logging/meters.py deleted file mode 100644 index 2100b1fa0b2704b1c585f59e9349655bba0cc9e6..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/logging/meters.py +++ /dev/null @@ -1,323 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import bisect -import time -from collections import OrderedDict -from typing import Dict, Optional - - -try: - import torch - - def type_as(a, b): - if torch.is_tensor(a) and torch.is_tensor(b): - return a.to(b) - else: - return a - - -except ImportError: - torch = None - - def type_as(a, b): - return a - - -try: - import numpy as np -except ImportError: - np = None - - -class Meter(object): - """Base class for Meters.""" - - def __init__(self): - pass - - def state_dict(self): - return {} - - def load_state_dict(self, state_dict): - pass - - def reset(self): - raise NotImplementedError - - @property - def smoothed_value(self) -> float: - """Smoothed value used for logging.""" - raise NotImplementedError - - -def safe_round(number, ndigits): - if hasattr(number, "__round__"): - return round(number, ndigits) - elif torch is not None and torch.is_tensor(number) and number.numel() == 1: - return safe_round(number.item(), ndigits) - elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"): - return safe_round(number.item(), ndigits) - else: - return number - - -class AverageMeter(Meter): - """Computes and stores the average and current value""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.reset() - - def reset(self): - self.val = None # most recent update - self.sum = 0 # sum from all updates - self.count = 0 # total n from all updates - - def update(self, val, n=1): - if val is not None: - self.val = val - if n > 0: - self.sum = type_as(self.sum, val) + (val * n) - self.count = type_as(self.count, n) + n - - def state_dict(self): - return { - "val": self.val, - "sum": self.sum, - "count": self.count, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.val = state_dict["val"] - self.sum = state_dict["sum"] - self.count = state_dict["count"] - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.sum / self.count if self.count > 0 else self.val - - @property - def smoothed_value(self) -> float: - val = self.avg - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class SumMeter(Meter): - """Computes and stores the sum""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.reset() - - def reset(self): - self.sum = 0 # sum from all updates - - def update(self, val): - if val is not None: - self.sum = type_as(self.sum, val) + val - - def state_dict(self): - return { - "sum": self.sum, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.sum = state_dict["sum"] - self.round = state_dict.get("round", None) - - @property - def smoothed_value(self) -> float: - val = self.sum - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class TimeMeter(Meter): - """Computes the average occurrence of some event per second""" - - def __init__( - self, - init: int = 0, - n: int = 0, - round: Optional[int] = None, - ): - self.round = round - self.reset(init, n) - - def reset(self, init=0, n=0): - self.init = init - self.start = time.perf_counter() - self.n = n - self.i = 0 - - def update(self, val=1): - self.n = type_as(self.n, val) + val - self.i += 1 - - def state_dict(self): - return { - "init": self.elapsed_time, - "n": self.n, - "round": self.round, - } - - def load_state_dict(self, state_dict): - if "start" in state_dict: - # backwards compatibility for old state_dicts - self.reset(init=state_dict["init"]) - else: - self.reset(init=state_dict["init"], n=state_dict["n"]) - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.n / self.elapsed_time - - @property - def elapsed_time(self): - return self.init + (time.perf_counter() - self.start) - - @property - def smoothed_value(self) -> float: - val = self.avg - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class StopwatchMeter(Meter): - """Computes the sum/avg duration of some event in seconds""" - - def __init__(self, round: Optional[int] = None): - self.round = round - self.sum = 0 - self.n = 0 - self.start_time = None - - def start(self): - self.start_time = time.perf_counter() - - def stop(self, n=1, prehook=None): - if self.start_time is not None: - if prehook is not None: - prehook() - delta = time.perf_counter() - self.start_time - self.sum = self.sum + delta - self.n = type_as(self.n, n) + n - - def reset(self): - self.sum = 0 # cumulative time during which stopwatch was active - self.n = 0 # total n across all start/stop - self.start() - - def state_dict(self): - return { - "sum": self.sum, - "n": self.n, - "round": self.round, - } - - def load_state_dict(self, state_dict): - self.sum = state_dict["sum"] - self.n = state_dict["n"] - self.start_time = None - self.round = state_dict.get("round", None) - - @property - def avg(self): - return self.sum / self.n if self.n > 0 else self.sum - - @property - def elapsed_time(self): - if self.start_time is None: - return 0.0 - return time.perf_counter() - self.start_time - - @property - def smoothed_value(self) -> float: - val = self.avg if self.sum > 0 else self.elapsed_time - if self.round is not None and val is not None: - val = safe_round(val, self.round) - return val - - -class MetersDict(OrderedDict): - """A sorted dictionary of :class:`Meters`. - - Meters are sorted according to a priority that is given when the - meter is first added to the dictionary. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.priorities = [] - - def __setitem__(self, key, value): - assert key not in self, "MetersDict doesn't support reassignment" - priority, value = value - bisect.insort(self.priorities, (priority, len(self.priorities), key)) - super().__setitem__(key, value) - for _, _, key in self.priorities: # reorder dict to match priorities - self.move_to_end(key) - - def add_meter(self, key, meter, priority): - self.__setitem__(key, (priority, meter)) - - def state_dict(self): - return [ - (pri, key, self[key].__class__.__name__, self[key].state_dict()) - for pri, _, key in self.priorities - # can't serialize DerivedMeter instances - if not isinstance(self[key], MetersDict._DerivedMeter) - ] - - def load_state_dict(self, state_dict): - self.clear() - self.priorities.clear() - for pri, key, meter_cls, meter_state in state_dict: - meter = globals()[meter_cls]() - meter.load_state_dict(meter_state) - self.add_meter(key, meter, pri) - - def get_smoothed_value(self, key: str) -> float: - """Get a single smoothed value.""" - meter = self[key] - if isinstance(meter, MetersDict._DerivedMeter): - return meter.fn(self) - else: - return meter.smoothed_value - - def get_smoothed_values(self) -> Dict[str, float]: - """Get all smoothed values.""" - return OrderedDict( - [ - (key, self.get_smoothed_value(key)) - for key in self.keys() - if not key.startswith("_") - ] - ) - - def reset(self): - """Reset Meter instances.""" - for meter in self.values(): - if isinstance(meter, MetersDict._DerivedMeter): - continue - meter.reset() - - class _DerivedMeter(Meter): - """A Meter whose values are derived from other Meters.""" - - def __init__(self, fn): - self.fn = fn - - def reset(self): - pass diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/fconv_lm.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/fconv_lm.py deleted file mode 100644 index 4b243d6669cb57880353b45a01843ec22010fb5f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/fconv_lm.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq import utils -from fairseq.models import ( - FairseqLanguageModel, - register_model, - register_model_architecture, -) -from fairseq.models.fconv import FConvDecoder -from fairseq.utils import safe_hasattr - - -@register_model("fconv_lm") -class FConvLanguageModel(FairseqLanguageModel): - def __init__(self, decoder): - super().__init__(decoder) - - @staticmethod - def add_args(parser): - """Add model-specific arguments to the parser.""" - parser.add_argument( - "--dropout", type=float, metavar="D", help="dropout probability" - ) - parser.add_argument( - "--decoder-embed-dim", - type=int, - metavar="N", - help="decoder embedding dimension", - ) - parser.add_argument( - "--decoder-layers", - type=str, - metavar="EXPR", - help="decoder layers [(dim, kernel_size), ...]", - ) - parser.add_argument( - "--decoder-out-embed-dim", - type=int, - metavar="N", - help="decoder output embedding dimension", - ) - parser.add_argument( - "--adaptive-softmax-cutoff", - metavar="EXPR", - help="comma separated list of adaptive softmax cutoff points. " - "Must be used with adaptive_loss criterion", - ) - parser.add_argument( - "--adaptive-softmax-dropout", - type=float, - metavar="D", - help="sets adaptive softmax dropout for the tail projections", - ) - parser.add_argument( - "--decoder-attention", - type=str, - metavar="EXPR", - help="decoder attention [True, ...]", - ) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - # make sure all arguments are present in older models - base_lm_architecture(args) - - if safe_hasattr(args, "max_target_positions") and not safe_hasattr( - args, "tokens_per_sample" - ): - args.tokens_per_sample = args.max_target_positions - - decoder = FConvDecoder( - dictionary=task.target_dictionary, - embed_dim=args.decoder_embed_dim, - convolutions=eval(args.decoder_layers), - out_embed_dim=args.decoder_embed_dim, - attention=eval(args.decoder_attention), - dropout=args.dropout, - max_positions=args.tokens_per_sample, - share_embed=False, - positional_embeddings=False, - adaptive_softmax_cutoff=( - utils.eval_str_list(args.adaptive_softmax_cutoff, type=int) - if args.criterion == "adaptive_loss" - else None - ), - adaptive_softmax_dropout=args.adaptive_softmax_dropout, - ) - return FConvLanguageModel(decoder) - - -@register_model_architecture("fconv_lm", "fconv_lm") -def base_lm_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) - args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13") - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) - args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) - - -@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103") -def fconv_lm_dauphin_wikitext103(args): - layers = "[(850, 6)] * 3" - layers += " + [(850, 1)] * 1" - layers += " + [(850, 5)] * 4" - layers += " + [(850, 1)] * 1" - layers += " + [(850, 4)] * 3" - layers += " + [(1024, 4)] * 1" - layers += " + [(2048, 4)] * 1" - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280) - args.decoder_layers = getattr(args, "decoder_layers", layers) - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,20000,200000" - ) - base_lm_architecture(args) - - -@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw") -def fconv_lm_dauphin_gbw(args): - layers = "[(512, 5)]" - layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3" - layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3" - layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6" - layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]" - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128) - args.decoder_layers = getattr(args, "decoder_layers", layers) - args.decoder_attention = getattr(args, "decoder_attention", "False") - args.adaptive_softmax_cutoff = getattr( - args, "adaptive_softmax_cutoff", "10000,50000,200000" - ) - base_lm_architecture(args) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/conv_tbc.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/conv_tbc.py deleted file mode 100644 index 65e17ec94f7e595cb657b3d2daaa1052a95d0677..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/modules/conv_tbc.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from torch import nn -from torch.nn.modules.utils import _single -from torch import Tensor - - -class ConvTBC(torch.nn.Module): - """1D convolution over an input of shape (time x batch x channel) - - The implementation uses gemm to perform the convolution. This implementation - is faster than cuDNN for small kernel sizes. - """ - - def __init__(self, in_channels, out_channels, kernel_size, padding=0): - super(ConvTBC, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _single(kernel_size) - self.padding = _single(padding) - - self.weight = torch.nn.Parameter( - torch.Tensor(self.kernel_size[0], in_channels, out_channels) - ) - self.bias = torch.nn.Parameter(torch.Tensor(out_channels)) - - self.reset_parameters() - - def reset_parameters(self): - nn.init.xavier_normal_(self.weight) - nn.init.zeros_(self.bias) - - def conv_tbc(self, input: Tensor): - return torch.conv_tbc( - input.contiguous(), self.weight, self.bias, self.padding[0] - ) - - def forward(self, input: Tensor): - return self.conv_tbc(input) - - def __repr__(self): - s = ( - "{name}({in_channels}, {out_channels}, kernel_size={kernel_size}" - ", padding={padding}" - ) - if self.bias is None: - s += ", bias=False" - s += ")" - return s.format(name=self.__class__.__name__, **self.__dict__) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py deleted file mode 100644 index 73c3c8ea3435d6050401c45e737e4ecf5662825c..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/optim/lr_scheduler/polynomial_decay_schedule.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from dataclasses import dataclass, field -from typing import Optional, List -from omegaconf import II - -from fairseq.dataclass import FairseqDataclass -from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler - - -@dataclass -class PolynomialDecayLRScheduleConfig(FairseqDataclass): - warmup_updates: int = field( - default=0, - metadata={"help": "warmup the learning rate linearly for the first N updates"}, - ) - warmup_ratio: float = field( - default=0, - metadata={"help": "warmup ratio"}, - ) - force_anneal: Optional[int] = field( - default=None, - metadata={"help": "force annealing at specified epoch"}, - ) - end_learning_rate: float = field( - default=0.0, - metadata={"help": "learning rate to decay to"}, - ) - power: float = field( - default=1.0, - metadata={"help": "decay exponent"}, - ) - total_num_update: Optional[float] = field( - default=1000000, - metadata={"help": "total number of updates over which to decay learning rate"}, - ) - lr: List[float] = II("optimization.lr") - - -@register_lr_scheduler("polynomial_decay", dataclass=PolynomialDecayLRScheduleConfig) -class PolynomialDecayLRSchedule(FairseqLRScheduler): - """Decay the LR on a fixed schedule.""" - - def __init__(self, cfg: PolynomialDecayLRScheduleConfig, optimizer): - super().__init__(cfg, optimizer) - - assert cfg.total_num_update > 0 - # set defaults - cfg.warmup_updates = getattr(cfg, 'warmup_updates', 0) or 0 - - self.lr = cfg.lr[0] - self.warmup_updates = cfg.warmup_updates - if self.warmup_updates > 0: - self.warmup_factor = 1.0 / self.warmup_updates - else: - self.warmup_factor = 1 - self.end_learning_rate = cfg.end_learning_rate - self.total_num_update = cfg.total_num_update - self.power = cfg.power - self.optimizer.set_lr(self.warmup_factor * self.lr) - - def get_next_lr(self, epoch): - lrs = self.cfg.lr - if self.cfg.force_anneal is None or epoch < self.cfg.force_anneal: - # use fixed LR schedule - next_lr = lrs[min(epoch, len(lrs) - 1)] - else: - # annneal based on lr_shrink - next_lr = self.optimizer.get_lr() - return next_lr - - def step_begin_epoch(self, epoch): - """Update the learning rate at the beginning of the given epoch.""" - self.lr = self.get_next_lr(epoch) - self.optimizer.set_lr(self.warmup_factor * self.lr) - return self.optimizer.get_lr() - - def step_update(self, num_updates): - """Update the learning rate after each update.""" - if self.warmup_updates > 0 and num_updates <= self.warmup_updates: - self.warmup_factor = num_updates / float(self.warmup_updates) - lr = self.warmup_factor * self.lr - elif num_updates >= self.total_num_update: - lr = self.end_learning_rate - else: - warmup = self.warmup_updates - lr_range = self.lr - self.end_learning_rate - pct_remaining = 1 - (num_updates - warmup) / (self.total_num_update - warmup) - lr = lr_range * pct_remaining ** (self.power) + self.end_learning_rate - self.optimizer.set_lr(lr) - return self.optimizer.get_lr() - - def reinit(self, total_num_update, num_updates): - # only enable this when set warmup_ratio - if self.cfg.warmup_ratio <= 0: - return - # re init this according to the real number of updates - self.total_num_update = total_num_update - self.warmup_updates = int(self.total_num_update * self.cfg.warmup_ratio) - if num_updates > 0: - self.warmup_factor = min(1.0, num_updates / float(self.warmup_updates)) - self.step_update(num_updates) - else: - self.warmup_factor = 1.0 / self.warmup_updates - self.optimizer.set_lr(self.warmup_factor * self.lr) - print('Total steps {}, warmup steps {}, warmup_factor {}'.format(self.total_num_update, self.warmup_updates, - self.warmup_factor)) \ No newline at end of file diff --git a/spaces/OIUGLK/bingo/src/components/welcome-screen.tsx b/spaces/OIUGLK/bingo/src/components/welcome-screen.tsx deleted file mode 100644 index f7449fcbb6c621875e235db98f2790bf7894fb0a..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/components/welcome-screen.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { useBing } from '@/lib/hooks/use-bing' - -const exampleMessages = [ - { - heading: '🧐 提出复杂问题', - message: `我可以为我挑剔的只吃橙色食物的孩子做什么饭?` - }, - { - heading: '🙌 获取更好的答案', - message: '销量最高的 3 种宠物吸尘器有哪些优点和缺点?' - }, - { - heading: '🎨 获得创意灵感', - message: `以海盗的口吻写一首关于外太空鳄鱼的俳句` - } -] - -export function WelcomeScreen({ setInput }: Pick, 'setInput'>) { - return ( -
      - {exampleMessages.map(example => ( - - ))} -
      - ) -} diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp deleted file mode 100644 index c843487b5fa4e8077dd27402ec99009266ddda8d..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) Facebook, Inc. and its affiliates. -#include "box_iou_rotated.h" -#include "box_iou_rotated_utils.h" - -namespace detectron2 { - -template -void box_iou_rotated_cpu_kernel( - const at::Tensor& boxes1, - const at::Tensor& boxes2, - at::Tensor& ious) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - - for (int i = 0; i < num_boxes1; i++) { - for (int j = 0; j < num_boxes2; j++) { - ious[i * num_boxes2 + j] = single_box_iou_rotated( - boxes1[i].data_ptr(), boxes2[j].data_ptr()); - } - } -} - -at::Tensor box_iou_rotated_cpu( - // input must be contiguous: - const at::Tensor& boxes1, - const at::Tensor& boxes2) { - auto num_boxes1 = boxes1.size(0); - auto num_boxes2 = boxes2.size(0); - at::Tensor ious = - at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat)); - - box_iou_rotated_cpu_kernel(boxes1, boxes2, ious); - - // reshape from 1d array to 2d array - auto shape = std::vector{num_boxes1, num_boxes2}; - return ious.reshape(shape); -} - -} // namespace detectron2 diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn.py deleted file mode 100644 index 565e2940ad0e4c43ec2172d4a79a9bd72adef09e..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/backbone/bifpn.py +++ /dev/null @@ -1,425 +0,0 @@ -# Modified from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/efficientdet.py -# The original file is under Apache-2.0 License -import math -from os.path import join -import numpy as np -from collections import OrderedDict -from typing import List - -import torch -from torch import nn -import torch.utils.model_zoo as model_zoo -import torch.nn.functional as F -import fvcore.nn.weight_init as weight_init - -from detectron2.layers import ShapeSpec, Conv2d -from detectron2.modeling.backbone.resnet import build_resnet_backbone -from detectron2.modeling.backbone.build import BACKBONE_REGISTRY -from detectron2.layers.batch_norm import get_norm -from detectron2.modeling.backbone import Backbone -from .dlafpn import dla34 - -def get_fpn_config(base_reduction=8): - """BiFPN config with sum.""" - p = { - 'nodes': [ - {'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]}, - {'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]}, - {'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]}, - {'reduction': base_reduction, 'inputs_offsets': [0, 7]}, - {'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]}, - {'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]}, - {'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]}, - {'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]}, - ], - 'weight_method': 'fastattn', - } - return p - - -def swish(x, inplace: bool = False): - """Swish - Described in: https://arxiv.org/abs/1710.05941 - """ - return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) - - -class Swish(nn.Module): - def __init__(self, inplace: bool = False): - super(Swish, self).__init__() - self.inplace = inplace - - def forward(self, x): - return swish(x, self.inplace) - - -class SequentialAppend(nn.Sequential): - def __init__(self, *args): - super(SequentialAppend, self).__init__(*args) - - def forward(self, x): - for module in self: - x.append(module(x)) - return x - - -class SequentialAppendLast(nn.Sequential): - def __init__(self, *args): - super(SequentialAppendLast, self).__init__(*args) - - # def forward(self, x: List[torch.Tensor]): - def forward(self, x): - for module in self: - x.append(module(x[-1])) - return x - - -class ConvBnAct2d(nn.Module): - def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding='', bias=False, - norm='', act_layer=Swish): - super(ConvBnAct2d, self).__init__() - # self.conv = create_conv2d( - # in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias) - self.conv = Conv2d( - in_channels, out_channels, kernel_size=kernel_size, stride=stride, - padding=kernel_size // 2, bias=(norm == '')) - self.bn = get_norm(norm, out_channels) - self.act = None if act_layer is None else act_layer(inplace=True) - - def forward(self, x): - x = self.conv(x) - if self.bn is not None: - x = self.bn(x) - if self.act is not None: - x = self.act(x) - return x - - -class SeparableConv2d(nn.Module): - """ Separable Conv - """ - def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False, - channel_multiplier=1.0, pw_kernel_size=1, act_layer=Swish, - norm=''): - super(SeparableConv2d, self).__init__() - - # self.conv_dw = create_conv2d( - # in_channels, int(in_channels * channel_multiplier), kernel_size, - # stride=stride, dilation=dilation, padding=padding, depthwise=True) - - self.conv_dw = Conv2d( - in_channels, int(in_channels * channel_multiplier), - kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=bias, - groups=out_channels) - # print('conv_dw', kernel_size, stride) - # self.conv_pw = create_conv2d( - # int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias) - - self.conv_pw = Conv2d( - int(in_channels * channel_multiplier), out_channels, - kernel_size=pw_kernel_size, padding=pw_kernel_size // 2, bias=(norm=='')) - # print('conv_pw', pw_kernel_size) - - self.bn = get_norm(norm, out_channels) - self.act = None if act_layer is None else act_layer(inplace=True) - - def forward(self, x): - x = self.conv_dw(x) - x = self.conv_pw(x) - if self.bn is not None: - x = self.bn(x) - if self.act is not None: - x = self.act(x) - return x - - -class ResampleFeatureMap(nn.Sequential): - def __init__(self, in_channels, out_channels, reduction_ratio=1., pad_type='', pooling_type='max', - norm='', apply_bn=False, conv_after_downsample=False, - redundant_bias=False): - super(ResampleFeatureMap, self).__init__() - pooling_type = pooling_type or 'max' - self.in_channels = in_channels - self.out_channels = out_channels - self.reduction_ratio = reduction_ratio - self.conv_after_downsample = conv_after_downsample - - conv = None - if in_channels != out_channels: - conv = ConvBnAct2d( - in_channels, out_channels, kernel_size=1, padding=pad_type, - norm=norm if apply_bn else '', - bias=not apply_bn or redundant_bias, act_layer=None) - - if reduction_ratio > 1: - stride_size = int(reduction_ratio) - if conv is not None and not self.conv_after_downsample: - self.add_module('conv', conv) - self.add_module( - 'downsample', - # create_pool2d( - # pooling_type, kernel_size=stride_size + 1, stride=stride_size, padding=pad_type) - # nn.MaxPool2d(kernel_size=stride_size + 1, stride=stride_size, padding=pad_type) - nn.MaxPool2d(kernel_size=stride_size, stride=stride_size) - ) - if conv is not None and self.conv_after_downsample: - self.add_module('conv', conv) - else: - if conv is not None: - self.add_module('conv', conv) - if reduction_ratio < 1: - scale = int(1 // reduction_ratio) - self.add_module('upsample', nn.UpsamplingNearest2d(scale_factor=scale)) - - -class FpnCombine(nn.Module): - def __init__(self, feature_info, fpn_config, fpn_channels, inputs_offsets, target_reduction, pad_type='', - pooling_type='max', norm='', apply_bn_for_resampling=False, - conv_after_downsample=False, redundant_bias=False, weight_method='attn'): - super(FpnCombine, self).__init__() - self.inputs_offsets = inputs_offsets - self.weight_method = weight_method - - self.resample = nn.ModuleDict() - for idx, offset in enumerate(inputs_offsets): - in_channels = fpn_channels - if offset < len(feature_info): - in_channels = feature_info[offset]['num_chs'] - input_reduction = feature_info[offset]['reduction'] - else: - node_idx = offset - len(feature_info) - # print('node_idx, len', node_idx, len(fpn_config['nodes'])) - input_reduction = fpn_config['nodes'][node_idx]['reduction'] - reduction_ratio = target_reduction / input_reduction - self.resample[str(offset)] = ResampleFeatureMap( - in_channels, fpn_channels, reduction_ratio=reduction_ratio, pad_type=pad_type, - pooling_type=pooling_type, norm=norm, - apply_bn=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample, - redundant_bias=redundant_bias) - - if weight_method == 'attn' or weight_method == 'fastattn': - # WSM - self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True) - else: - self.edge_weights = None - - def forward(self, x): - dtype = x[0].dtype - nodes = [] - for offset in self.inputs_offsets: - input_node = x[offset] - input_node = self.resample[str(offset)](input_node) - nodes.append(input_node) - - if self.weight_method == 'attn': - normalized_weights = torch.softmax(self.edge_weights.type(dtype), dim=0) - x = torch.stack(nodes, dim=-1) * normalized_weights - elif self.weight_method == 'fastattn': - edge_weights = nn.functional.relu(self.edge_weights.type(dtype)) - weights_sum = torch.sum(edge_weights) - x = torch.stack( - [(nodes[i] * edge_weights[i]) / (weights_sum + 0.0001) for i in range(len(nodes))], dim=-1) - elif self.weight_method == 'sum': - x = torch.stack(nodes, dim=-1) - else: - raise ValueError('unknown weight_method {}'.format(self.weight_method)) - x = torch.sum(x, dim=-1) - return x - - -class BiFpnLayer(nn.Module): - def __init__(self, feature_info, fpn_config, fpn_channels, num_levels=5, pad_type='', - pooling_type='max', norm='', act_layer=Swish, - apply_bn_for_resampling=False, conv_after_downsample=True, conv_bn_relu_pattern=False, - separable_conv=True, redundant_bias=False): - super(BiFpnLayer, self).__init__() - self.fpn_config = fpn_config - self.num_levels = num_levels - self.conv_bn_relu_pattern = False - - self.feature_info = [] - self.fnode = SequentialAppend() - for i, fnode_cfg in enumerate(fpn_config['nodes']): - # logging.debug('fnode {} : {}'.format(i, fnode_cfg)) - # print('fnode {} : {}'.format(i, fnode_cfg)) - fnode_layers = OrderedDict() - - # combine features - reduction = fnode_cfg['reduction'] - fnode_layers['combine'] = FpnCombine( - feature_info, fpn_config, fpn_channels, fnode_cfg['inputs_offsets'], target_reduction=reduction, - pad_type=pad_type, pooling_type=pooling_type, norm=norm, - apply_bn_for_resampling=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample, - redundant_bias=redundant_bias, weight_method=fpn_config['weight_method']) - self.feature_info.append(dict(num_chs=fpn_channels, reduction=reduction)) - - # after combine ops - after_combine = OrderedDict() - if not conv_bn_relu_pattern: - after_combine['act'] = act_layer(inplace=True) - conv_bias = redundant_bias - conv_act = None - else: - conv_bias = False - conv_act = act_layer - conv_kwargs = dict( - in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type, - bias=conv_bias, norm=norm, act_layer=conv_act) - after_combine['conv'] = SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs) - fnode_layers['after_combine'] = nn.Sequential(after_combine) - - self.fnode.add_module(str(i), nn.Sequential(fnode_layers)) - - self.feature_info = self.feature_info[-num_levels::] - - def forward(self, x): - x = self.fnode(x) - return x[-self.num_levels::] - - -class BiFPN(Backbone): - def __init__( - self, cfg, bottom_up, in_features, out_channels, norm='', - num_levels=5, num_bifpn=4, separable_conv=False, - ): - super(BiFPN, self).__init__() - assert isinstance(bottom_up, Backbone) - - # Feature map strides and channels from the bottom up network (e.g. ResNet) - input_shapes = bottom_up.output_shape() - in_strides = [input_shapes[f].stride for f in in_features] - in_channels = [input_shapes[f].channels for f in in_features] - - self.num_levels = num_levels - self.num_bifpn = num_bifpn - self.bottom_up = bottom_up - self.in_features = in_features - self._size_divisibility = 128 - levels = [int(math.log2(s)) for s in in_strides] - self._out_feature_strides = { - "p{}".format(int(math.log2(s))): s for s in in_strides} - if len(in_features) < num_levels: - for l in range(num_levels - len(in_features)): - s = l + levels[-1] - self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1) - self._out_features = list(sorted(self._out_feature_strides.keys())) - self._out_feature_channels = {k: out_channels for k in self._out_features} - - # print('self._out_feature_strides', self._out_feature_strides) - # print('self._out_feature_channels', self._out_feature_channels) - - feature_info = [ - {'num_chs': in_channels[level], 'reduction': in_strides[level]} \ - for level in range(len(self.in_features)) - ] - # self.config = config - fpn_config = get_fpn_config() - self.resample = SequentialAppendLast() - for level in range(num_levels): - if level < len(feature_info): - in_chs = in_channels[level] # feature_info[level]['num_chs'] - reduction = in_strides[level] # feature_info[level]['reduction'] - else: - # Adds a coarser level by downsampling the last feature map - reduction_ratio = 2 - self.resample.add_module(str(level), ResampleFeatureMap( - in_channels=in_chs, - out_channels=out_channels, - pad_type='same', - pooling_type=None, - norm=norm, - reduction_ratio=reduction_ratio, - apply_bn=True, - conv_after_downsample=False, - redundant_bias=False, - )) - in_chs = out_channels - reduction = int(reduction * reduction_ratio) - feature_info.append(dict(num_chs=in_chs, reduction=reduction)) - - self.cell = nn.Sequential() - for rep in range(self.num_bifpn): - # logging.debug('building cell {}'.format(rep)) - # print('building cell {}'.format(rep)) - fpn_layer = BiFpnLayer( - feature_info=feature_info, - fpn_config=fpn_config, - fpn_channels=out_channels, - num_levels=self.num_levels, - pad_type='same', - pooling_type=None, - norm=norm, - act_layer=Swish, - separable_conv=separable_conv, - apply_bn_for_resampling=True, - conv_after_downsample=False, - conv_bn_relu_pattern=False, - redundant_bias=False, - ) - self.cell.add_module(str(rep), fpn_layer) - feature_info = fpn_layer.feature_info - # import pdb; pdb.set_trace() - - @property - def size_divisibility(self): - return self._size_divisibility - - def forward(self, x): - # print('input shapes', x.shape) - bottom_up_features = self.bottom_up(x) - x = [bottom_up_features[f] for f in self.in_features] - assert len(self.resample) == self.num_levels - len(x) - x = self.resample(x) - shapes = [xx.shape for xx in x] - # print('resample shapes', shapes) - x = self.cell(x) - out = {f: xx for f, xx in zip(self._out_features, x)} - # import pdb; pdb.set_trace() - return out - - -@BACKBONE_REGISTRY.register() -def build_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = build_resnet_backbone(cfg, input_shape) - in_features = cfg.MODEL.FPN.IN_FEATURES - backbone = BiFPN( - cfg=cfg, - bottom_up=bottom_up, - in_features=in_features, - out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, - norm=cfg.MODEL.BIFPN.NORM, - num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, - num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, - separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV, - ) - return backbone - -@BACKBONE_REGISTRY.register() -def build_p37_dla_bifpn_backbone(cfg, input_shape: ShapeSpec): - """ - Args: - cfg: a detectron2 CfgNode - Returns: - backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. - """ - bottom_up = dla34(cfg) - in_features = cfg.MODEL.FPN.IN_FEATURES - assert cfg.MODEL.BIFPN.NUM_LEVELS == 5 - - backbone = BiFPN( - cfg=cfg, - bottom_up=bottom_up, - in_features=in_features, - out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS, - norm=cfg.MODEL.BIFPN.NORM, - num_levels=cfg.MODEL.BIFPN.NUM_LEVELS, - num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN, - separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV, - ) - return backbone diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/renderer.py b/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/renderer.py deleted file mode 100644 index 5ae14c5cdb1785226a52ae6b71b08f01de069962..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/pyrender/renderer.py +++ /dev/null @@ -1,1339 +0,0 @@ -"""PBR renderer for Python. - -Author: Matthew Matl -""" -import sys - -import numpy as np -import PIL - -from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags, - ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR, - SHADOW_TEX_SZ, MAX_N_LIGHTS) -from .shader_program import ShaderProgramCache -from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial -from .light import PointLight, SpotLight, DirectionalLight -from .font import FontCache -from .utils import format_color_vector - -from OpenGL.GL import * - - -class Renderer(object): - """Class for handling all rendering operations on a scene. - - Note - ---- - This renderer relies on the existence of an OpenGL context and - does not create one on its own. - - Parameters - ---------- - viewport_width : int - Width of the viewport in pixels. - viewport_height : int - Width of the viewport height in pixels. - point_size : float, optional - Size of points in pixels. Defaults to 1.0. - """ - - def __init__(self, viewport_width, viewport_height, point_size=1.0): - self.dpscale = 1 - # Scaling needed on retina displays - if sys.platform == 'darwin': - self.dpscale = 2 - - self.viewport_width = viewport_width - self.viewport_height = viewport_height - self.point_size = point_size - - # Optional framebuffer for offscreen renders - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - self._shadow_fb = None - self._latest_znear = DEFAULT_Z_NEAR - self._latest_zfar = DEFAULT_Z_FAR - - # Shader Program Cache - self._program_cache = ShaderProgramCache() - self._font_cache = FontCache() - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = self.dpscale * value - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = self.dpscale * value - - @property - def point_size(self): - """float : The size of screen-space points, in pixels. - """ - return self._point_size - - @point_size.setter - def point_size(self, value): - self._point_size = float(value) - - def render(self, scene, flags, seg_node_map=None): - """Render a scene with the given set of flags. - - Parameters - ---------- - scene : :class:`Scene` - A scene to render. - flags : int - A specification from :class:`.RenderFlags`. - seg_node_map : dict - A map from :class:`.Node` objects to (3,) colors for each. - If specified along with flags set to :attr:`.RenderFlags.SEG`, - the color image will be a segmentation image. - - Returns - ------- - color_im : (h, w, 3) uint8 or (h, w, 4) uint8 - If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is - normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set, - the buffer will be a full RGBA buffer. - depth_im : (h, w) float32 - If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer - in linear units. - """ - # Update context with meshes and textures - self._update_context(scene, flags) - - # Render necessary shadow maps - if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - for ln in scene.light_nodes: - take_pass = False - if (isinstance(ln.light, DirectionalLight) and - bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)): - take_pass = True - elif (isinstance(ln.light, SpotLight) and - bool(flags & RenderFlags.SHADOWS_SPOT)): - take_pass = True - elif (isinstance(ln.light, PointLight) and - bool(flags & RenderFlags.SHADOWS_POINT)): - take_pass = True - if take_pass: - self._shadow_mapping_pass(scene, ln, flags) - - # Make forward pass - retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map) - - # If necessary, make normals pass - if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS): - self._normals_pass(scene, flags) - - # Update camera settings for retrieving depth buffers - self._latest_znear = scene.main_camera_node.camera.znear - self._latest_zfar = scene.main_camera_node.camera.zfar - - return retval - - def render_text(self, text, x, y, font_name='OpenSans-Regular', - font_pt=40, color=None, scale=1.0, - align=TextAlign.BOTTOM_LEFT): - """Render text into the current viewport. - - Note - ---- - This cannot be done into an offscreen buffer. - - Parameters - ---------- - text : str - The text to render. - x : int - Horizontal pixel location of text. - y : int - Vertical pixel location of text. - font_name : str - Name of font, from the ``pyrender/fonts`` folder, or - a path to a ``.ttf`` file. - font_pt : int - Height of the text, in font points. - color : (4,) float - The color of the text. Default is black. - scale : int - Scaling factor for text. - align : int - One of the :class:`TextAlign` options which specifies where the - ``x`` and ``y`` parameters lie on the text. For example, - :attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate - the position of the bottom-left corner of the textbox. - """ - x *= self.dpscale - y *= self.dpscale - font_pt *= self.dpscale - - if color is None: - color = np.array([0.0, 0.0, 0.0, 1.0]) - else: - color = format_color_vector(color, 4) - - # Set up viewport for render - self._configure_forward_pass_viewport(0) - - # Load font - font = self._font_cache.get_font(font_name, font_pt) - if not font._in_context(): - font._add_to_context() - - # Load program - program = self._get_text_program() - program._bind() - - # Set uniforms - p = np.eye(4) - p[0,0] = 2.0 / self.viewport_width - p[0,3] = -1.0 - p[1,1] = 2.0 / self.viewport_height - p[1,3] = -1.0 - program.set_uniform('projection', p) - program.set_uniform('text_color', color) - - # Draw text - font.render_string(text, x, y, scale, align) - - def read_color_buf(self): - """Read and return the current viewport's color buffer. - - Alpha cannot be computed for an on-screen buffer. - - Returns - ------- - color_im : (h, w, 3) uint8 - The color buffer in RGB byte format. - """ - # Extract color image from frame buffer - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) - - # Re-format them into numpy arrays - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im - - def read_depth_buf(self): - """Read and return the current viewport's color buffer. - - Returns - ------- - depth_im : (h, w) float32 - The depth buffer in linear units. - """ - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near, z_far = self._latest_znear, self._latest_zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - return depth_im - - def delete(self): - """Free all allocated OpenGL resources. - """ - # Free shaders - self._program_cache.clear() - - # Free fonts - self._font_cache.clear() - - # Free meshes - for mesh in self._meshes: - for p in mesh.primitives: - p.delete() - - # Free textures - for mesh_texture in self._mesh_textures: - mesh_texture.delete() - - for shadow_texture in self._shadow_textures: - shadow_texture.delete() - - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - self._delete_main_framebuffer() - self._delete_shadow_framebuffer() - - def __del__(self): - try: - self.delete() - except Exception: - pass - - ########################################################################### - # Rendering passes - ########################################################################### - - def _forward_pass(self, scene, flags, seg_node_map=None): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - - # Clear it - if bool(flags & RenderFlags.SEG): - glClearColor(0.0, 0.0, 0.0, 1.0) - if seg_node_map is None: - seg_node_map = {} - else: - glClearColor(*scene.bg_color) - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - if not bool(flags & RenderFlags.SEG): - glEnable(GL_MULTISAMPLE) - else: - glDisable(GL_MULTISAMPLE) - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - program = None - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - # If SEG, set color - if bool(flags & RenderFlags.SEG): - if node not in seg_node_map: - continue - color = seg_node_map[node] - if not isinstance(color, (list, tuple, np.ndarray)): - color = np.repeat(color, 3) - else: - color = np.asanyarray(color) - color = color / 255.0 - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - if bool(flags & RenderFlags.SEG): - program.set_uniform('color', color) - - # Next, bind the lighting - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or - flags & RenderFlags.SEG): - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - # If doing offscreen render, copy result from framebuffer and return - if flags & RenderFlags.OFFSCREEN: - return self._read_main_framebuffer(scene, flags) - else: - return - - def _shadow_mapping_pass(self, scene, light_node, flags): - light = light_node.light - - # Set up viewport for render - self._configure_shadow_mapping_viewport(light, flags) - - # Set up camera matrices - V, P = self._get_light_cam_matrices(scene, light_node, flags) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.NONE - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _normals_pass(self, scene, flags): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - program = None - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # Skip objects that don't have normals - if not primitive.buf_flags & BufFlags.NORMAL: - continue - - # First, get and bind the appropriate program - pf = ProgramFlags.NONE - if flags & RenderFlags.VERTEX_NORMALS: - pf = pf | ProgramFlags.VERTEX_NORMALS - if flags & RenderFlags.FACE_NORMALS: - pf = pf | ProgramFlags.FACE_NORMALS - program = self._get_primitive_program(primitive, flags, pf) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform('normal_magnitude', 0.05 * primitive.scale) - program.set_uniform( - 'normal_color', np.array([0.1, 0.1, 1.0, 1.0]) - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - ########################################################################### - # Handlers for binding uniforms and drawing primitives - ########################################################################### - - def _bind_and_draw_primitive(self, primitive, pose, program, flags): - # Set model pose matrix - program.set_uniform('M', pose) - - # Bind mesh buffers - primitive._bind() - - # Bind mesh material - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - material = primitive.material - - # Bind textures - tf = material.tex_flags - if tf & TexFlags.NORMAL: - self._bind_texture(material.normalTexture, - 'material.normal_texture', program) - if tf & TexFlags.OCCLUSION: - self._bind_texture(material.occlusionTexture, - 'material.occlusion_texture', program) - if tf & TexFlags.EMISSIVE: - self._bind_texture(material.emissiveTexture, - 'material.emissive_texture', program) - if tf & TexFlags.BASE_COLOR: - self._bind_texture(material.baseColorTexture, - 'material.base_color_texture', program) - if tf & TexFlags.METALLIC_ROUGHNESS: - self._bind_texture(material.metallicRoughnessTexture, - 'material.metallic_roughness_texture', - program) - if tf & TexFlags.DIFFUSE: - self._bind_texture(material.diffuseTexture, - 'material.diffuse_texture', program) - if tf & TexFlags.SPECULAR_GLOSSINESS: - self._bind_texture(material.specularGlossinessTexture, - 'material.specular_glossiness_texture', - program) - - # Bind other uniforms - b = 'material.{}' - program.set_uniform(b.format('emissive_factor'), - material.emissiveFactor) - if isinstance(material, MetallicRoughnessMaterial): - program.set_uniform(b.format('base_color_factor'), - material.baseColorFactor) - program.set_uniform(b.format('metallic_factor'), - material.metallicFactor) - program.set_uniform(b.format('roughness_factor'), - material.roughnessFactor) - elif isinstance(material, SpecularGlossinessMaterial): - program.set_uniform(b.format('diffuse_factor'), - material.diffuseFactor) - program.set_uniform(b.format('specular_factor'), - material.specularFactor) - program.set_uniform(b.format('glossiness_factor'), - material.glossinessFactor) - - # Set blending options - if material.alphaMode == 'BLEND': - glEnable(GL_BLEND) - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) - else: - glEnable(GL_BLEND) - glBlendFunc(GL_ONE, GL_ZERO) - - # Set wireframe mode - wf = material.wireframe - if flags & RenderFlags.FLIP_WIREFRAME: - wf = not wf - if (flags & RenderFlags.ALL_WIREFRAME) or wf: - glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) - else: - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set culling mode - if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES: - glDisable(GL_CULL_FACE) - else: - glEnable(GL_CULL_FACE) - glCullFace(GL_BACK) - else: - glEnable(GL_CULL_FACE) - glEnable(GL_BLEND) - glCullFace(GL_BACK) - glBlendFunc(GL_ONE, GL_ZERO) - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set point size if needed - glDisable(GL_PROGRAM_POINT_SIZE) - if primitive.mode == GLTF.POINTS: - glEnable(GL_PROGRAM_POINT_SIZE) - glPointSize(self.point_size) - - # Render mesh - n_instances = 1 - if primitive.poses is not None: - n_instances = len(primitive.poses) - - if primitive.indices is not None: - glDrawElementsInstanced( - primitive.mode, primitive.indices.size, GL_UNSIGNED_INT, - ctypes.c_void_p(0), n_instances - ) - else: - glDrawArraysInstanced( - primitive.mode, 0, len(primitive.positions), n_instances - ) - - # Unbind mesh buffers - primitive._unbind() - - def _bind_lighting(self, scene, program, node, flags): - """Bind all lighting uniform values for a scene. - """ - max_n_lights = self._compute_max_n_lights(flags) - - n_d = min(len(scene.directional_light_nodes), max_n_lights[0]) - n_s = min(len(scene.spot_light_nodes), max_n_lights[1]) - n_p = min(len(scene.point_light_nodes), max_n_lights[2]) - program.set_uniform('ambient_light', scene.ambient_light) - program.set_uniform('n_directional_lights', n_d) - program.set_uniform('n_spot_lights', n_s) - program.set_uniform('n_point_lights', n_p) - plc = 0 - slc = 0 - dlc = 0 - - light_nodes = scene.light_nodes - if (len(scene.directional_light_nodes) > max_n_lights[0] or - len(scene.spot_light_nodes) > max_n_lights[1] or - len(scene.point_light_nodes) > max_n_lights[2]): - light_nodes = self._sorted_nodes_by_distance( - scene, scene.light_nodes, node - ) - - for n in light_nodes: - light = n.light - pose = scene.get_pose(n) - position = pose[:3,3] - direction = -pose[:3,2] - - if isinstance(light, PointLight): - if plc == max_n_lights[2]: - continue - b = 'point_lights[{}].'.format(plc) - plc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_POINT) - program.set_uniform(b + 'position', position) - elif isinstance(light, SpotLight): - if slc == max_n_lights[1]: - continue - b = 'spot_lights[{}].'.format(slc) - slc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_SPOT) - las = 1.0 / max(0.001, np.cos(light.innerConeAngle) - - np.cos(light.outerConeAngle)) - lao = -np.cos(light.outerConeAngle) * las - program.set_uniform(b + 'direction', direction) - program.set_uniform(b + 'position', position) - program.set_uniform(b + 'light_angle_scale', las) - program.set_uniform(b + 'light_angle_offset', lao) - else: - if dlc == max_n_lights[0]: - continue - b = 'directional_lights[{}].'.format(dlc) - dlc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL) - program.set_uniform(b + 'direction', direction) - - program.set_uniform(b + 'color', light.color) - program.set_uniform(b + 'intensity', light.intensity) - # if light.range is not None: - # program.set_uniform(b + 'range', light.range) - # else: - # program.set_uniform(b + 'range', 0) - - if shadow: - self._bind_texture(light.shadow_texture, - b + 'shadow_map', program) - if not isinstance(light, PointLight): - V, P = self._get_light_cam_matrices(scene, n, flags) - program.set_uniform(b + 'light_matrix', P.dot(V)) - else: - raise NotImplementedError( - 'Point light shadows not implemented' - ) - - def _sorted_mesh_nodes(self, scene): - cam_loc = scene.get_pose(scene.main_camera_node)[:3,3] - solid_nodes = [] - trans_nodes = [] - for node in scene.mesh_nodes: - mesh = node.mesh - if mesh.is_transparent: - trans_nodes.append(node) - else: - solid_nodes.append(node) - - # TODO BETTER SORTING METHOD - trans_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - solid_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - - return solid_nodes + trans_nodes - - def _sorted_nodes_by_distance(self, scene, nodes, compare_node): - nodes = list(nodes) - compare_posn = scene.get_pose(compare_node)[:3,3] - nodes.sort(key=lambda n: np.linalg.norm( - scene.get_pose(n)[:3,3] - compare_posn) - ) - return nodes - - ########################################################################### - # Context Management - ########################################################################### - - def _update_context(self, scene, flags): - - # Update meshes - scene_meshes = scene.meshes - - # Add new meshes to context - for mesh in scene_meshes - self._meshes: - for p in mesh.primitives: - p._add_to_context() - - # Remove old meshes from context - for mesh in self._meshes - scene_meshes: - for p in mesh.primitives: - p.delete() - - self._meshes = scene_meshes.copy() - - # Update mesh textures - mesh_textures = set() - for m in scene_meshes: - for p in m.primitives: - mesh_textures |= p.material.textures - - # Add new textures to context - for texture in mesh_textures - self._mesh_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._mesh_textures - mesh_textures: - texture.delete() - - self._mesh_textures = mesh_textures.copy() - - shadow_textures = set() - for l in scene.lights: - # Create if needed - active = False - if (isinstance(l, DirectionalLight) and - flags & RenderFlags.SHADOWS_DIRECTIONAL): - active = True - elif (isinstance(l, PointLight) and - flags & RenderFlags.SHADOWS_POINT): - active = True - elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT: - active = True - - if active and l.shadow_texture is None: - l._generate_shadow_texture() - if l.shadow_texture is not None: - shadow_textures.add(l.shadow_texture) - - # Add new textures to context - for texture in shadow_textures - self._shadow_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._shadow_textures - shadow_textures: - texture.delete() - - self._shadow_textures = shadow_textures.copy() - - ########################################################################### - # Texture Management - ########################################################################### - - def _bind_texture(self, texture, uniform_name, program): - """Bind a texture to an active texture unit and return - the texture unit index that was used. - """ - tex_id = self._get_next_active_texture() - glActiveTexture(GL_TEXTURE0 + tex_id) - texture._bind() - program.set_uniform(uniform_name, tex_id) - - def _get_next_active_texture(self): - val = self._texture_alloc_idx - self._texture_alloc_idx += 1 - return val - - def _reset_active_textures(self): - self._texture_alloc_idx = 0 - - ########################################################################### - # Camera Matrix Management - ########################################################################### - - def _get_camera_matrices(self, scene): - main_camera_node = scene.main_camera_node - if main_camera_node is None: - raise ValueError('Cannot render scene without a camera') - P = main_camera_node.camera.get_projection_matrix( - width=self.viewport_width, height=self.viewport_height - ) - pose = scene.get_pose(main_camera_node) - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - def _get_light_cam_matrices(self, scene, light_node, flags): - light = light_node.light - pose = scene.get_pose(light_node).copy() - s = scene.scale - camera = light._get_shadow_camera(s) - P = camera.get_projection_matrix() - if isinstance(light, DirectionalLight): - direction = -pose[:3,2] - c = scene.centroid - loc = c - direction * s - pose[:3,3] = loc - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - ########################################################################### - # Shader Program Management - ########################################################################### - - def _get_text_program(self): - program = self._program_cache.get_program( - vertex_shader='text.vert', - fragment_shader='text.frag' - ) - - if not program._in_context(): - program._add_to_context() - - return program - - def _compute_max_n_lights(self, flags): - max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS] - n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS) - - # Reserved texture units: 6 - # Normal Map - # Occlusion Map - # Emissive Map - # Base Color or Diffuse Map - # MR or SG Map - # Environment cubemap - - n_reserved_textures = 6 - n_available_textures = n_tex_units - n_reserved_textures - - # Distribute textures evenly among lights with shadows, with - # a preference for directional lights - n_shadow_types = 0 - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_SPOT: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_POINT: - n_shadow_types += 1 - - if n_shadow_types > 0: - tex_per_light = n_available_textures // n_shadow_types - - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - max_n_lights[0] = ( - tex_per_light + - (n_available_textures - tex_per_light * n_shadow_types) - ) - if flags & RenderFlags.SHADOWS_SPOT: - max_n_lights[1] = tex_per_light - if flags & RenderFlags.SHADOWS_POINT: - max_n_lights[2] = tex_per_light - - return max_n_lights - - def _get_primitive_program(self, primitive, flags, program_flags): - vertex_shader = None - fragment_shader = None - geometry_shader = None - defines = {} - - if (bool(program_flags & ProgramFlags.USE_MATERIAL) and - not flags & RenderFlags.DEPTH_ONLY and - not flags & RenderFlags.FLAT and - not flags & RenderFlags.SEG): - vertex_shader = 'mesh.vert' - fragment_shader = 'mesh.frag' - elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS | - ProgramFlags.FACE_NORMALS)): - vertex_shader = 'vertex_normals.vert' - if primitive.mode == GLTF.POINTS: - geometry_shader = 'vertex_normals_pc.geom' - else: - geometry_shader = 'vertex_normals.geom' - fragment_shader = 'vertex_normals.frag' - elif flags & RenderFlags.FLAT: - vertex_shader = 'flat.vert' - fragment_shader = 'flat.frag' - elif flags & RenderFlags.SEG: - vertex_shader = 'segmentation.vert' - fragment_shader = 'segmentation.frag' - else: - vertex_shader = 'mesh_depth.vert' - fragment_shader = 'mesh_depth.frag' - - # Set up vertex buffer DEFINES - bf = primitive.buf_flags - buf_idx = 1 - if bf & BufFlags.NORMAL: - defines['NORMAL_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TANGENT: - defines['TANGENT_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_0: - defines['TEXCOORD_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_1: - defines['TEXCOORD_1_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.COLOR_0: - defines['COLOR_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.JOINTS_0: - defines['JOINTS_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.WEIGHTS_0: - defines['WEIGHTS_0_LOC'] = buf_idx - buf_idx += 1 - defines['INST_M_LOC'] = buf_idx - - # Set up shadow mapping defines - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_SPOT: - defines['SPOT_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_POINT: - defines['POINT_LIGHT_SHADOWS'] = 1 - max_n_lights = self._compute_max_n_lights(flags) - defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0] - defines['MAX_SPOT_LIGHTS'] = max_n_lights[1] - defines['MAX_POINT_LIGHTS'] = max_n_lights[2] - - # Set up vertex normal defines - if program_flags & ProgramFlags.VERTEX_NORMALS: - defines['VERTEX_NORMALS'] = 1 - if program_flags & ProgramFlags.FACE_NORMALS: - defines['FACE_NORMALS'] = 1 - - # Set up material texture defines - if bool(program_flags & ProgramFlags.USE_MATERIAL): - tf = primitive.material.tex_flags - if tf & TexFlags.NORMAL: - defines['HAS_NORMAL_TEX'] = 1 - if tf & TexFlags.OCCLUSION: - defines['HAS_OCCLUSION_TEX'] = 1 - if tf & TexFlags.EMISSIVE: - defines['HAS_EMISSIVE_TEX'] = 1 - if tf & TexFlags.BASE_COLOR: - defines['HAS_BASE_COLOR_TEX'] = 1 - if tf & TexFlags.METALLIC_ROUGHNESS: - defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1 - if tf & TexFlags.DIFFUSE: - defines['HAS_DIFFUSE_TEX'] = 1 - if tf & TexFlags.SPECULAR_GLOSSINESS: - defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1 - if isinstance(primitive.material, MetallicRoughnessMaterial): - defines['USE_METALLIC_MATERIAL'] = 1 - elif isinstance(primitive.material, SpecularGlossinessMaterial): - defines['USE_GLOSSY_MATERIAL'] = 1 - - program = self._program_cache.get_program( - vertex_shader=vertex_shader, - fragment_shader=fragment_shader, - geometry_shader=geometry_shader, - defines=defines - ) - - if not program._in_context(): - program._add_to_context() - - return program - - ########################################################################### - # Viewport Management - ########################################################################### - - def _configure_forward_pass_viewport(self, flags): - - # If using offscreen render, bind main framebuffer - if flags & RenderFlags.OFFSCREEN: - self._configure_main_framebuffer() - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - else: - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - - glViewport(0, 0, self.viewport_width, self.viewport_height) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - def _configure_shadow_mapping_viewport(self, light, flags): - self._configure_shadow_framebuffer() - glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb) - light.shadow_texture._bind() - light.shadow_texture._bind_as_depth_attachment() - glActiveTexture(GL_TEXTURE0) - light.shadow_texture._bind() - glDrawBuffer(GL_NONE) - glReadBuffer(GL_NONE) - - glClear(GL_DEPTH_BUFFER_BIT) - glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - glDisable(GL_CULL_FACE) - glDisable(GL_BLEND) - - ########################################################################### - # Framebuffer Management - ########################################################################### - - def _configure_shadow_framebuffer(self): - if self._shadow_fb is None: - self._shadow_fb = glGenFramebuffers(1) - - def _delete_shadow_framebuffer(self): - if self._shadow_fb is not None: - glDeleteFramebuffers(1, [self._shadow_fb]) - - def _configure_main_framebuffer(self): - # If mismatch with prior framebuffer, delete it - if (self._main_fb is not None and - self.viewport_width != self._main_fb_dims[0] or - self.viewport_height != self._main_fb_dims[1]): - self._delete_main_framebuffer() - - # If framebuffer doesn't exist, create it - if self._main_fb is None: - # Generate standard buffer - self._main_cb, self._main_db = glGenRenderbuffers(2) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_RGBA, - self.viewport_width, self.viewport_height - ) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, - self.viewport_width, self.viewport_height - ) - - self._main_fb = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db - ) - - # Generate multisample buffer - self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2) - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_RGBA, - # self.viewport_width, self.viewport_height - # ) - # glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24, - # self.viewport_width, self.viewport_height - # ) - # 增加这一行 - num_samples = min(glGetIntegerv(GL_MAX_SAMPLES), 4) # No more than GL_MAX_SAMPLES - - # 其实就是把 4 替换成 num_samples,其余不变 - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_RGBA, self.viewport_width, self.viewport_height) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) # 这行不变 - - # 这一行也是将 4 替换成 num_samples - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT24, self.viewport_width, self.viewport_height) - - self._main_fb_ms = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb_ms - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db_ms - ) - - self._main_fb_dims = (self.viewport_width, self.viewport_height) - - def _delete_main_framebuffer(self): - if self._main_fb is not None: - glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms]) - if self._main_cb is not None: - glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms]) - if self._main_db is not None: - glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms]) - - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - - def _read_main_framebuffer(self, scene, flags): - width, height = self._main_fb_dims[0], self._main_fb_dims[1] - - # Bind framebuffer and blit buffers - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_COLOR_BUFFER_BIT, GL_LINEAR - ) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_DEPTH_BUFFER_BIT, GL_NEAREST - ) - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb) - - # Read depth - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near = scene.main_camera_node.camera.znear - z_far = scene.main_camera_node.camera.zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - if flags & RenderFlags.DEPTH_ONLY: - return depth_im - - # Read color - if flags & RenderFlags.RGBA: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 4)) - else: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im, depth_im - - def _resize_image(self, value, antialias=False): - """If needed, rescale the render for MacOS.""" - img = PIL.Image.fromarray(value) - resample = PIL.Image.NEAREST - if antialias: - resample = PIL.Image.BILINEAR - size = (self.viewport_width // self.dpscale, - self.viewport_height // self.dpscale) - img = img.resize(size, resample=resample) - return np.array(img) - - ########################################################################### - # Shadowmap Debugging - ########################################################################### - - def _forward_pass_no_reset(self, scene, flags): - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Next, bind the lighting - if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT: - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False): - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - glClearColor(*scene.bg_color) - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - w = self.viewport_width - h = self.viewport_height - - num_nodes = len(light_nodes) - viewport_dims = { - (0, 2): [0, h // 2, w // 2, h], - (1, 2): [w // 2, h // 2, w, h], - (0, 3): [0, h // 2, w // 2, h], - (1, 3): [w // 2, h // 2, w, h], - (2, 3): [0, 0, w // 2, h // 2], - (0, 4): [0, h // 2, w // 2, h], - (1, 4): [w // 2, h // 2, w, h], - (2, 4): [0, 0, w // 2, h // 2], - (3, 4): [w // 2, 0, w, h // 2] - } - - if tile: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(*viewport_dims[(i, num_nodes + 1)]) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - i += 1 - glViewport(*viewport_dims[(i, num_nodes + 1)]) - self._forward_pass_no_reset(scene, flags) - else: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(0, 0, self.viewport_width, self.viewport_height) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - return - - def _get_debug_quad_program(self): - program = self._program_cache.get_program( - vertex_shader='debug_quad.vert', - fragment_shader='debug_quad.frag' - ) - if not program._in_context(): - program._add_to_context() - return program - - def _render_debug_quad(self): - x = glGenVertexArrays(1) - glBindVertexArray(x) - glDrawArrays(GL_TRIANGLES, 0, 6) - glBindVertexArray(0) - glDeleteVertexArrays(1, [x]) diff --git a/spaces/OptimalScale/Robin-7b/lmflow/models/auto_model.py b/spaces/OptimalScale/Robin-7b/lmflow/models/auto_model.py deleted file mode 100644 index dab6c150c73b69176ca7fa9635fdb9ef84b275cc..0000000000000000000000000000000000000000 --- a/spaces/OptimalScale/Robin-7b/lmflow/models/auto_model.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -"""Automatically get correct model type. -""" - -from lmflow.models.hf_decoder_model import HFDecoderModel -from lmflow.models.text_regression_model import TextRegressionModel -from lmflow.models.hf_encoder_decoder_model import HFEncoderDecoderModel - -class AutoModel: - - @classmethod - def get_model(self, model_args, *args, **kwargs): - arch_type = model_args.arch_type - if arch_type == "decoder_only": - return HFDecoderModel(model_args, *args, **kwargs) - elif arch_type == "text_regression": - return TextRegressionModel(model_args, *args, **kwargs) - elif arch_type == "encoder_decoder": - return HFEncoderDecoderModel(model_args, *args, **kwargs) - else: - raise NotImplementedError( - f"model architecture type \"{arch_type}\" is not supported" - ) diff --git a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/data/lion_cheetah_dataset.py b/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/data/lion_cheetah_dataset.py deleted file mode 100644 index a915009dac9ecf89fd6aa4aa0ca27b8769133ea7..0000000000000000000000000000000000000000 --- a/spaces/Oumar199/Fake-Real-Face-Detection/fake_face_detection/data/lion_cheetah_dataset.py +++ /dev/null @@ -1,102 +0,0 @@ - -from fake_face_detection.utils.compute_weights import compute_weights -from torch.utils.data import Dataset -from PIL import Image -from glob import glob -import numpy as np -import torch -import os - -class LionCheetahDataset(Dataset): - - def __init__(self, lion_path: str, cheetah_path: str, id_map: dict, transformer, **transformer_kwargs): - - # let us recuperate the transformer - self.transformer = transformer - - # let us recuperate the transformer kwargs - self.transformer_kwargs = transformer_kwargs - - # let us load the images - lion_images = glob(os.path.join(lion_path, "*")) - - cheetah_images = glob(os.path.join(cheetah_path, "*")) - - # recuperate rgb images - self.lion_images = [] - - self.cheetah_images = [] - - for lion in lion_images: - - try: - - with Image.open(lion) as img: - - # let us add a transformation on the images - if self.transformer: - - image = self.transformer(img, **self.transformer_kwargs) - - self.lion_images.append(lion) - - except Exception as e: - - pass - - for cheetah in cheetah_images: - - try: - - with Image.open(cheetah) as img: - - # let us add a transformation on the images - if self.transformer: - - image = self.transformer(img, **self.transformer_kwargs) - - self.cheetah_images.append(cheetah) - - except Exception as e: - - pass - - self.images = self.lion_images + self.cheetah_images - - # let us recuperate the labels - self.lion_labels = [int(id_map['lion'])] * len(self.lion_images) - - self.cheetah_labels = [int(id_map['cheetah'])] * len(self.cheetah_images) - - self.labels = self.lion_labels + self.cheetah_labels - - # let us recuperate the weights - self.weights = torch.from_numpy(compute_weights(self.labels)) - - # let us recuperate the length - self.length = len(self.labels) - - def __getitem__(self, index): - - # let us recuperate an image - image = self.images[index] - - with Image.open(image) as img: - - # let us recuperate a label - label = self.labels[index] - - # let us add a transformation on the images - if self.transformer: - - image = self.transformer(img, **self.transformer_kwargs) - - # let us add the label inside the obtained dictionary - image['labels'] = label - - return image - - def __len__(self): - - return self.length - diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/encoders/modules.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/encoders/modules.py deleted file mode 100644 index 4edd5496b9e668ea72a5be39db9cca94b6a42f9b..0000000000000000000000000000000000000000 --- a/spaces/PAIR/PAIR-Diffusion/ldm/modules/encoders/modules.py +++ /dev/null @@ -1,213 +0,0 @@ -import torch -import torch.nn as nn -from torch.utils.checkpoint import checkpoint - -from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel - -import open_clip -from ldm.util import default, count_params - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - -class IdentityEncoder(AbstractEncoder): - - def encode(self, x): - return x - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class', ucg_rate=0.1): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - self.n_classes = n_classes - self.ucg_rate = ucg_rate - - def forward(self, batch, key=None, disable_dropout=False): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - if self.ucg_rate > 0. and not disable_dropout: - mask = 1. - torch.bernoulli(torch.ones_like(c) * self.ucg_rate) - c = mask * c + (1-mask) * torch.ones_like(c)*(self.n_classes-1) - c = c.long() - c = self.embedding(c) - return c - - def get_unconditional_conditioning(self, bs, device="cuda"): - uc_class = self.n_classes - 1 # 1000 classes --> 0 ... 999, one extra class for ucg (class 1000) - uc = torch.ones((bs,), device=device) * uc_class - uc = {self.key: uc} - return uc - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class FrozenT5Embedder(AbstractEncoder): - """Uses the T5 transformer encoder for text""" - def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl - super().__init__() - self.tokenizer = T5Tokenizer.from_pretrained(version) - self.transformer = T5EncoderModel.from_pretrained(version) - self.device = device - self.max_length = max_length # TODO: typical value? - if freeze: - self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - #self.train = disabled_train - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from huggingface)""" - LAYERS = [ - "last", - "pooled", - "hidden" - ] - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77, - freeze=True, layer="last", layer_idx=None): # clip-vit-base-patch32 - super().__init__() - assert layer in self.LAYERS - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - if freeze: - self.freeze() - self.layer = layer - self.layer_idx = layer_idx - if layer == "hidden": - assert layer_idx is not None - assert 0 <= abs(layer_idx) <= 12 - - def freeze(self): - self.transformer = self.transformer.eval() - #self.train = disabled_train - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer=="hidden") - if self.layer == "last": - z = outputs.last_hidden_state - elif self.layer == "pooled": - z = outputs.pooler_output[:, None, :] - else: - z = outputs.hidden_states[self.layer_idx] - return z - - def encode(self, text): - return self(text) - - -class FrozenOpenCLIPEmbedder(AbstractEncoder): - """ - Uses the OpenCLIP transformer encoder for text - """ - LAYERS = [ - #"pooled", - "last", - "penultimate" - ] - def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77, - freeze=True, layer="last"): - super().__init__() - assert layer in self.LAYERS - model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version) - del model.visual - self.model = model - - self.device = device - self.max_length = max_length - if freeze: - self.freeze() - self.layer = layer - if self.layer == "last": - self.layer_idx = 0 - elif self.layer == "penultimate": - self.layer_idx = 1 - else: - raise NotImplementedError() - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = open_clip.tokenize(text) - z = self.encode_with_transformer(tokens.to(self.device)) - return z - - def encode_with_transformer(self, text): - x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model] - x = x + self.model.positional_embedding - x = x.permute(1, 0, 2) # NLD -> LND - x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.model.ln_final(x) - return x - - def text_transformer_forward(self, x: torch.Tensor, attn_mask = None): - for i, r in enumerate(self.model.transformer.resblocks): - if i == len(self.model.transformer.resblocks) - self.layer_idx: - break - if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting(): - x = checkpoint(r, x, attn_mask) - else: - x = r(x, attn_mask=attn_mask) - return x - - def encode(self, text): - return self(text) - - -class FrozenCLIPT5Encoder(AbstractEncoder): - def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda", - clip_max_length=77, t5_max_length=77): - super().__init__() - self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length) - self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length) - print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, " - f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.") - - def encode(self, text): - return self(text) - - def forward(self, text): - clip_z = self.clip_encoder.encode(text) - t5_z = self.t5_encoder.encode(text) - return [clip_z, t5_z] - - diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/serialize.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/serialize.go deleted file mode 100644 index e0bce139eac527c0a5c73b31ee6ebf70ef889957..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/texinfo/serialize.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/markup-macros.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/markup-macros.go deleted file mode 100644 index 1412e5dfb5a069c4615c139859e1b74114e75b29..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/lilypond/2.24.2/ccache/lily/markup-macros.go and /dev/null differ diff --git a/spaces/PeepDaSlan9/AutoGPT/autogpt/processing/__init__.py b/spaces/PeepDaSlan9/AutoGPT/autogpt/processing/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B-B2BMGMT/README.md b/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B-B2BMGMT/README.md deleted file mode 100644 index 9408ec388d557d046f74e0cdd1beadfd902e66ef..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/EleutherAI-gpt-j-6B-B2BMGMT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: EleutherAI Gpt J 6B B2BMGMT -emoji: 🦀 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Pengyey/bingo-chuchu/src/components/toaster.tsx b/spaces/Pengyey/bingo-chuchu/src/components/toaster.tsx deleted file mode 100644 index 4d2693460b61307a1d4c127fd01df9bee16e59ff..0000000000000000000000000000000000000000 --- a/spaces/Pengyey/bingo-chuchu/src/components/toaster.tsx +++ /dev/null @@ -1,3 +0,0 @@ -'use client' - -export { Toaster } from 'react-hot-toast' diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py deleted file mode 100644 index 336c7b254fe392b4703039fec86a83acdbd2e1a5..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py +++ /dev/null @@ -1,35 +0,0 @@ -_base_ = './cityscapes.py' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (769, 769) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2049, 1025), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/group_points.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/group_points.py deleted file mode 100644 index 6c3ec9d758ebe4e1c2205882af4be154008253a5..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/ops/group_points.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch -from torch import nn as nn -from torch.autograd import Function - -from ..utils import ext_loader -from .ball_query import ball_query -from .knn import knn - -ext_module = ext_loader.load_ext( - '_ext', ['group_points_forward', 'group_points_backward']) - - -class QueryAndGroup(nn.Module): - """Groups points with a ball query of radius. - - Args: - max_radius (float): The maximum radius of the balls. - If None is given, we will use kNN sampling instead of ball query. - sample_num (int): Maximum number of features to gather in the ball. - min_radius (float, optional): The minimum radius of the balls. - Default: 0. - use_xyz (bool, optional): Whether to use xyz. - Default: True. - return_grouped_xyz (bool, optional): Whether to return grouped xyz. - Default: False. - normalize_xyz (bool, optional): Whether to normalize xyz. - Default: False. - uniform_sample (bool, optional): Whether to sample uniformly. - Default: False - return_unique_cnt (bool, optional): Whether to return the count of - unique samples. Default: False. - return_grouped_idx (bool, optional): Whether to return grouped idx. - Default: False. - """ - - def __init__(self, - max_radius, - sample_num, - min_radius=0, - use_xyz=True, - return_grouped_xyz=False, - normalize_xyz=False, - uniform_sample=False, - return_unique_cnt=False, - return_grouped_idx=False): - super().__init__() - self.max_radius = max_radius - self.min_radius = min_radius - self.sample_num = sample_num - self.use_xyz = use_xyz - self.return_grouped_xyz = return_grouped_xyz - self.normalize_xyz = normalize_xyz - self.uniform_sample = uniform_sample - self.return_unique_cnt = return_unique_cnt - self.return_grouped_idx = return_grouped_idx - if self.return_unique_cnt: - assert self.uniform_sample, \ - 'uniform_sample should be True when ' \ - 'returning the count of unique samples' - if self.max_radius is None: - assert not self.normalize_xyz, \ - 'can not normalize grouped xyz when max_radius is None' - - def forward(self, points_xyz, center_xyz, features=None): - """ - Args: - points_xyz (Tensor): (B, N, 3) xyz coordinates of the features. - center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods. - features (Tensor): (B, C, N) Descriptors of the features. - - Returns: - Tensor: (B, 3 + C, npoint, sample_num) Grouped feature. - """ - # if self.max_radius is None, we will perform kNN instead of ball query - # idx is of shape [B, npoint, sample_num] - if self.max_radius is None: - idx = knn(self.sample_num, points_xyz, center_xyz, False) - idx = idx.transpose(1, 2).contiguous() - else: - idx = ball_query(self.min_radius, self.max_radius, self.sample_num, - points_xyz, center_xyz) - - if self.uniform_sample: - unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) - for i_batch in range(idx.shape[0]): - for i_region in range(idx.shape[1]): - unique_ind = torch.unique(idx[i_batch, i_region, :]) - num_unique = unique_ind.shape[0] - unique_cnt[i_batch, i_region] = num_unique - sample_ind = torch.randint( - 0, - num_unique, (self.sample_num - num_unique, ), - dtype=torch.long) - all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) - idx[i_batch, i_region, :] = all_ind - - xyz_trans = points_xyz.transpose(1, 2).contiguous() - # (B, 3, npoint, sample_num) - grouped_xyz = grouping_operation(xyz_trans, idx) - grouped_xyz_diff = grouped_xyz - \ - center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets - if self.normalize_xyz: - grouped_xyz_diff /= self.max_radius - - if features is not None: - grouped_features = grouping_operation(features, idx) - if self.use_xyz: - # (B, C + 3, npoint, sample_num) - new_features = torch.cat([grouped_xyz_diff, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - assert (self.use_xyz - ), 'Cannot have not features and not use xyz as a feature!' - new_features = grouped_xyz_diff - - ret = [new_features] - if self.return_grouped_xyz: - ret.append(grouped_xyz) - if self.return_unique_cnt: - ret.append(unique_cnt) - if self.return_grouped_idx: - ret.append(idx) - if len(ret) == 1: - return ret[0] - else: - return tuple(ret) - - -class GroupAll(nn.Module): - """Group xyz with feature. - - Args: - use_xyz (bool): Whether to use xyz. - """ - - def __init__(self, use_xyz: bool = True): - super().__init__() - self.use_xyz = use_xyz - - def forward(self, - xyz: torch.Tensor, - new_xyz: torch.Tensor, - features: torch.Tensor = None): - """ - Args: - xyz (Tensor): (B, N, 3) xyz coordinates of the features. - new_xyz (Tensor): new xyz coordinates of the features. - features (Tensor): (B, C, N) features to group. - - Returns: - Tensor: (B, C + 3, 1, N) Grouped feature. - """ - grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) - if features is not None: - grouped_features = features.unsqueeze(2) - if self.use_xyz: - # (B, 3 + C, 1, N) - new_features = torch.cat([grouped_xyz, grouped_features], - dim=1) - else: - new_features = grouped_features - else: - new_features = grouped_xyz - - return new_features - - -class GroupingOperation(Function): - """Group feature with given index.""" - - @staticmethod - def forward(ctx, features: torch.Tensor, - indices: torch.Tensor) -> torch.Tensor: - """ - Args: - features (Tensor): (B, C, N) tensor of features to group. - indices (Tensor): (B, npoint, nsample) the indices of - features to group with. - - Returns: - Tensor: (B, C, npoint, nsample) Grouped features. - """ - features = features.contiguous() - indices = indices.contiguous() - - B, nfeatures, nsample = indices.size() - _, C, N = features.size() - output = torch.cuda.FloatTensor(B, C, nfeatures, nsample) - - ext_module.group_points_forward(B, C, N, nfeatures, nsample, features, - indices, output) - - ctx.for_backwards = (indices, N) - return output - - @staticmethod - def backward(ctx, - grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Args: - grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients - of the output from forward. - - Returns: - Tensor: (B, C, N) gradient of the features. - """ - idx, N = ctx.for_backwards - - B, C, npoint, nsample = grad_out.size() - grad_features = torch.cuda.FloatTensor(B, C, N).zero_() - - grad_out_data = grad_out.data.contiguous() - ext_module.group_points_backward(B, C, N, npoint, nsample, - grad_out_data, idx, - grad_features.data) - return grad_features, None - - -grouping_operation = GroupingOperation.apply diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/weight_init.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/weight_init.py deleted file mode 100644 index 38141ba3d61f64ddfc0a31574b4648cbad96d7dd..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/utils/weight_init.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Modified from https://github.com/rwightman/pytorch-image- -models/blob/master/timm/models/layers/drop.py.""" - -import math -import warnings - -import torch - - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - """Reference: https://people.sc.fsu.edu/~jburkardt/presentations - /truncated_normal.pdf""" - - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn( - 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' - 'The distribution of values may be incorrect.', - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - lower_bound = norm_cdf((a - mean) / std) - upper_bound = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor (``torch.Tensor``): an n-dimensional `torch.Tensor` - mean (float): the mean of the normal distribution - std (float): the standard deviation of the normal distribution - a (float): the minimum cutoff value - b (float): the maximum cutoff value - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/symbols.py b/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/symbols.py deleted file mode 100644 index 053a7105f7ce95aa51614f6995399fa2172b3eb2..0000000000000000000000000000000000000000 --- a/spaces/Plachta/VITS-Umamusume-voice-synthesizer/text/symbols.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' - - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/Poornima-fullstack/PoorniAI/README.md b/spaces/Poornima-fullstack/PoorniAI/README.md deleted file mode 100644 index 6284f98216e0999d9f2ac68c3fbd0ee7514157ae..0000000000000000000000000000000000000000 --- a/spaces/Poornima-fullstack/PoorniAI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PoorniAI -emoji: 🐨 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_seanet.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/RMXK/RVC_HFF/julius/lowpass.py b/spaces/RMXK/RVC_HFF/julius/lowpass.py deleted file mode 100644 index 0eb46e382b20bfc2d93482f9f027986b863de6f0..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/julius/lowpass.py +++ /dev/null @@ -1,181 +0,0 @@ -# File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details. -# Author: adefossez, 2020 -""" -FIR windowed sinc lowpass filters. -""" - -import math -from typing import Sequence, Optional - -import torch -from torch.nn import functional as F - -from .core import sinc -from .fftconv import fft_conv1d -from .utils import simple_repr - - -class LowPassFilters(torch.nn.Module): - """ - Bank of low pass filters. Note that a high pass or band pass filter can easily - be implemented by substracting a same signal processed with low pass filters with different - frequencies (see `julius.bands.SplitBands` for instance). - This uses a windowed sinc filter, very similar to the one used in - `julius.resample`. However, because we do not change the sample rate here, - this filter can be much more efficiently implemented using the FFT convolution from - `julius.fftconv`. - - Args: - cutoffs (list[float]): list of cutoff frequencies, in [0, 0.5] expressed as `f/f_s` where - f_s is the samplerate and `f` is the cutoff frequency. - The upper limit is 0.5, because a signal sampled at `f_s` contains only - frequencies under `f_s / 2`. - stride (int): how much to decimate the output. Keep in mind that decimation - of the output is only acceptable if the cutoff frequency is under `1/ (2 * stride)` - of the original sampling rate. - pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`, - the output will have the same length as the input. - zeros (float): Number of zero crossings to keep. - Controls the receptive field of the Finite Impulse Response filter. - For lowpass filters with low cutoff frequency, e.g. 40Hz at 44.1kHz, - it is a bad idea to set this to a high value. - This is likely appropriate for most use. Lower values - will result in a faster filter, but with a slower attenuation around the - cutoff frequency. - fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions. - If False, uses PyTorch convolutions. If None, either one will be chosen automatically - depending on the effective filter size. - - - ..warning:: - All the filters will use the same filter size, aligned on the lowest - frequency provided. If you combine a lot of filters with very diverse frequencies, it might - be more efficient to split them over multiple modules with similar frequencies. - - ..note:: - A lowpass with a cutoff frequency of 0 is defined as the null function - by convention here. This allows for a highpass with a cutoff of 0 to - be equal to identity, as defined in `julius.filters.HighPassFilters`. - - Shape: - - - Input: `[*, T]` - - Output: `[F, *, T']`, with `T'=T` if `pad` is True and `stride` is 1, and - `F` is the numer of cutoff frequencies. - - >>> lowpass = LowPassFilters([1/4]) - >>> x = torch.randn(4, 12, 21, 1024) - >>> list(lowpass(x).shape) - [1, 4, 12, 21, 1024] - """ - - def __init__(self, cutoffs: Sequence[float], stride: int = 1, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - super().__init__() - self.cutoffs = list(cutoffs) - if min(self.cutoffs) < 0: - raise ValueError("Minimum cutoff must be larger than zero.") - if max(self.cutoffs) > 0.5: - raise ValueError("A cutoff above 0.5 does not make sense.") - self.stride = stride - self.pad = pad - self.zeros = zeros - self.half_size = int(zeros / min([c for c in self.cutoffs if c > 0]) / 2) - if fft is None: - fft = self.half_size > 32 - self.fft = fft - window = torch.hann_window(2 * self.half_size + 1, periodic=False) - time = torch.arange(-self.half_size, self.half_size + 1) - filters = [] - for cutoff in cutoffs: - if cutoff == 0: - filter_ = torch.zeros_like(time) - else: - filter_ = 2 * cutoff * window * sinc(2 * cutoff * math.pi * time) - # Normalize filter to have sum = 1, otherwise we will have a small leakage - # of the constant component in the input signal. - filter_ /= filter_.sum() - filters.append(filter_) - self.register_buffer("filters", torch.stack(filters)[:, None]) - - def forward(self, input): - shape = list(input.shape) - input = input.view(-1, 1, shape[-1]) - if self.pad: - input = F.pad(input, (self.half_size, self.half_size), mode='replicate') - if self.fft: - out = fft_conv1d(input, self.filters, stride=self.stride) - else: - out = F.conv1d(input, self.filters, stride=self.stride) - shape.insert(0, len(self.cutoffs)) - shape[-1] = out.shape[-1] - return out.permute(1, 0, 2).reshape(shape) - - def __repr__(self): - return simple_repr(self) - - -class LowPassFilter(torch.nn.Module): - """ - Same as `LowPassFilters` but applies a single low pass filter. - - Shape: - - - Input: `[*, T]` - - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1. - - >>> lowpass = LowPassFilter(1/4, stride=2) - >>> x = torch.randn(4, 124) - >>> list(lowpass(x).shape) - [4, 62] - """ - - def __init__(self, cutoff: float, stride: int = 1, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - super().__init__() - self._lowpasses = LowPassFilters([cutoff], stride, pad, zeros, fft) - - @property - def cutoff(self): - return self._lowpasses.cutoffs[0] - - @property - def stride(self): - return self._lowpasses.stride - - @property - def pad(self): - return self._lowpasses.pad - - @property - def zeros(self): - return self._lowpasses.zeros - - @property - def fft(self): - return self._lowpasses.fft - - def forward(self, input): - return self._lowpasses(input)[0] - - def __repr__(self): - return simple_repr(self) - - -def lowpass_filters(input: torch.Tensor, cutoffs: Sequence[float], - stride: int = 1, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - """ - Functional version of `LowPassFilters`, refer to this class for more information. - """ - return LowPassFilters(cutoffs, stride, pad, zeros, fft).to(input)(input) - - -def lowpass_filter(input: torch.Tensor, cutoff: float, - stride: int = 1, pad: bool = True, - zeros: float = 8, fft: Optional[bool] = None): - """ - Same as `lowpass_filters` but with a single cutoff frequency. - Output will not have a dimension inserted in the front. - """ - return lowpass_filters(input, [cutoff], stride, pad, zeros, fft)[0] diff --git a/spaces/RMXK/RVC_HFF/lib/infer_pack/models.py b/spaces/RMXK/RVC_HFF/lib/infer_pack/models.py deleted file mode 100644 index ec107476df968e51aafc6c3d102a9ed8c53f141a..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/lib/infer_pack/models.py +++ /dev/null @@ -1,1144 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - if uv.device.type == "privateuseone": # for DirectML - uv = uv.float() - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap \ No newline at end of file diff --git a/spaces/RMeli/gnina-torch/html/wrapper.html b/spaces/RMeli/gnina-torch/html/wrapper.html deleted file mode 100644 index 07867366f680a6a287c14f3aad48c6087873e89b..0000000000000000000000000000000000000000 --- a/spaces/RMeli/gnina-torch/html/wrapper.html +++ /dev/null @@ -1,10 +0,0 @@ - \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/padding.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/padding.py deleted file mode 100644 index 1b2204f59f2ce4d9c8f2cca85326e4d81f8805bb..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/padding.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union - -if TYPE_CHECKING: - from .console import ( - Console, - ConsoleOptions, - RenderableType, - RenderResult, - ) -from .jupyter import JupyterMixin -from .measure import Measurement -from .style import Style -from .segment import Segment - - -PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]] - - -class Padding(JupyterMixin): - """Draw space around content. - - Example: - >>> print(Padding("Hello", (2, 4), style="on blue")) - - Args: - renderable (RenderableType): String or other renderable. - pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders. - May be specified with 1, 2, or 4 integers (CSS style). - style (Union[str, Style], optional): Style for padding characters. Defaults to "none". - expand (bool, optional): Expand padding to fit available width. Defaults to True. - """ - - def __init__( - self, - renderable: "RenderableType", - pad: "PaddingDimensions" = (0, 0, 0, 0), - *, - style: Union[str, Style] = "none", - expand: bool = True, - ): - self.renderable = renderable - self.top, self.right, self.bottom, self.left = self.unpack(pad) - self.style = style - self.expand = expand - - @classmethod - def indent(cls, renderable: "RenderableType", level: int) -> "Padding": - """Make padding instance to render an indent. - - Args: - renderable (RenderableType): String or other renderable. - level (int): Number of characters to indent. - - Returns: - Padding: A Padding instance. - """ - - return Padding(renderable, pad=(0, 0, 0, level), expand=False) - - @staticmethod - def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]: - """Unpack padding specified in CSS style.""" - if isinstance(pad, int): - return (pad, pad, pad, pad) - if len(pad) == 1: - _pad = pad[0] - return (_pad, _pad, _pad, _pad) - if len(pad) == 2: - pad_top, pad_right = cast(Tuple[int, int], pad) - return (pad_top, pad_right, pad_top, pad_right) - if len(pad) == 4: - top, right, bottom, left = cast(Tuple[int, int, int, int], pad) - return (top, right, bottom, left) - raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given") - - def __repr__(self) -> str: - return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))" - - def __rich_console__( - self, console: "Console", options: "ConsoleOptions" - ) -> "RenderResult": - style = console.get_style(self.style) - if self.expand: - width = options.max_width - else: - width = min( - Measurement.get(console, options, self.renderable).maximum - + self.left - + self.right, - options.max_width, - ) - render_options = options.update_width(width - self.left - self.right) - if render_options.height is not None: - render_options = render_options.update_height( - height=render_options.height - self.top - self.bottom - ) - lines = console.render_lines( - self.renderable, render_options, style=style, pad=True - ) - _Segment = Segment - - left = _Segment(" " * self.left, style) if self.left else None - right = ( - [_Segment(f'{" " * self.right}', style), _Segment.line()] - if self.right - else [_Segment.line()] - ) - blank_line: Optional[List[Segment]] = None - if self.top: - blank_line = [_Segment(f'{" " * width}\n', style)] - yield from blank_line * self.top - if left: - for line in lines: - yield left - yield from line - yield from right - else: - for line in lines: - yield from line - yield from right - if self.bottom: - blank_line = blank_line or [_Segment(f'{" " * width}\n', style)] - yield from blank_line * self.bottom - - def __rich_measure__( - self, console: "Console", options: "ConsoleOptions" - ) -> "Measurement": - max_width = options.max_width - extra_width = self.left + self.right - if max_width - extra_width < 1: - return Measurement(max_width, max_width) - measure_min, measure_max = Measurement.get(console, options, self.renderable) - measurement = Measurement(measure_min + extra_width, measure_max + extra_width) - measurement = measurement.with_maximum(max_width) - return measurement - - -if __name__ == "__main__": # pragma: no cover - from pip._vendor.rich import print - - print(Padding("Hello, World", (2, 4), style="on blue")) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/extra_validations.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/extra_validations.py deleted file mode 100644 index 4130a421cfd7260d323b13cbd9d75ab8146e6030..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/config/_validate_pyproject/extra_validations.py +++ /dev/null @@ -1,36 +0,0 @@ -"""The purpose of this module is implement PEP 621 validations that are -difficult to express as a JSON Schema (or that are not supported by the current -JSON Schema library). -""" - -from typing import Mapping, TypeVar - -from .error_reporting import ValidationError - -T = TypeVar("T", bound=Mapping) - - -class RedefiningStaticFieldAsDynamic(ValidationError): - """According to PEP 621: - - Build back-ends MUST raise an error if the metadata specifies a field - statically as well as being listed in dynamic. - """ - - -def validate_project_dynamic(pyproject: T) -> T: - project_table = pyproject.get("project", {}) - dynamic = project_table.get("dynamic", []) - - for field in dynamic: - if field in project_table: - msg = f"You cannot provide a value for `project.{field}` and " - msg += "list it under `project.dynamic` at the same time" - name = f"data.project.{field}" - value = {field: project_table[field], "...": " # ...", "dynamic": dynamic} - raise RedefiningStaticFieldAsDynamic(msg, value, name, rule="PEP 621") - - return pyproject - - -EXTRA_VALIDATIONS = (validate_project_dynamic,) diff --git a/spaces/RichardMB1217/blip/models/vit.py b/spaces/RichardMB1217/blip/models/vit.py deleted file mode 100644 index cec3d8e08ed4451d65392feb2e9f4848d1ef3899..0000000000000000000000000000000000000000 --- a/spaces/RichardMB1217/blip/models/vit.py +++ /dev/null @@ -1,305 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on timm code base - * https://github.com/rwightman/pytorch-image-models/tree/master/timm -''' - -import torch -import torch.nn as nn -import torch.nn.functional as F -from functools import partial - -from timm.models.vision_transformer import _cfg, PatchEmbed -from timm.models.registry import register_model -from timm.models.layers import trunc_normal_, DropPath -from timm.models.helpers import named_apply, adapt_input_conv - -from fairscale.nn.checkpoint.checkpoint_activations import checkpoint_wrapper - -class Mlp(nn.Module): - """ MLP as used in Vision Transformer, MLP-Mixer and related networks - """ - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - self.attn_gradients = None - self.attention_map = None - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def forward(self, x, register_hook=False): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - if register_hook: - self.save_attention_map(attn) - attn.register_hook(self.save_attn_gradients) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, use_grad_checkpointing=False): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - if use_grad_checkpointing: - self.attn = checkpoint_wrapper(self.attn) - self.mlp = checkpoint_wrapper(self.mlp) - - def forward(self, x, register_hook=False): - x = x + self.drop_path(self.attn(self.norm1(x), register_hook=register_hook)) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class VisionTransformer(nn.Module): - """ Vision Transformer - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, - num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=None, - use_grad_checkpointing=False, ckpt_layer=0): - """ - Args: - img_size (int, tuple): input image size - patch_size (int, tuple): patch size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - depth (int): depth of transformer - num_heads (int): number of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - norm_layer: (nn.Module): normalization layer - """ - super().__init__() - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed = PatchEmbed( - img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) - - num_patches = self.patch_embed.num_patches - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, - use_grad_checkpointing=(use_grad_checkpointing and i>=depth-ckpt_layer) - ) - for i in range(depth)]) - self.norm = norm_layer(embed_dim) - - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def forward(self, x, register_blk=-1): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - - x = x + self.pos_embed[:,:x.size(1),:] - x = self.pos_drop(x) - - for i,blk in enumerate(self.blocks): - x = blk(x, register_blk==i) - x = self.norm(x) - - return x - - @torch.jit.ignore() - def load_pretrained(self, checkpoint_path, prefix=''): - _load_weights(self, checkpoint_path, prefix) - - -@torch.no_grad() -def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''): - """ Load weights from .npz checkpoints for official Google Brain Flax implementation - """ - import numpy as np - - def _n2p(w, t=True): - if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1: - w = w.flatten() - if t: - if w.ndim == 4: - w = w.transpose([3, 2, 0, 1]) - elif w.ndim == 3: - w = w.transpose([2, 0, 1]) - elif w.ndim == 2: - w = w.transpose([1, 0]) - return torch.from_numpy(w) - - w = np.load(checkpoint_path) - if not prefix and 'opt/target/embedding/kernel' in w: - prefix = 'opt/target/' - - if hasattr(model.patch_embed, 'backbone'): - # hybrid - backbone = model.patch_embed.backbone - stem_only = not hasattr(backbone, 'stem') - stem = backbone if stem_only else backbone.stem - stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel']))) - stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale'])) - stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias'])) - if not stem_only: - for i, stage in enumerate(backbone.stages): - for j, block in enumerate(stage.blocks): - bp = f'{prefix}block{i + 1}/unit{j + 1}/' - for r in range(3): - getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel'])) - getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale'])) - getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias'])) - if block.downsample is not None: - block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel'])) - block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale'])) - block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias'])) - embed_conv_w = _n2p(w[f'{prefix}embedding/kernel']) - else: - embed_conv_w = adapt_input_conv( - model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel'])) - model.patch_embed.proj.weight.copy_(embed_conv_w) - model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias'])) - model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False)) - pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False) - if pos_embed_w.shape != model.pos_embed.shape: - pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights - pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size) - model.pos_embed.copy_(pos_embed_w) - model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale'])) - model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias'])) -# if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]: -# model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel'])) -# model.head.bias.copy_(_n2p(w[f'{prefix}head/bias'])) -# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w: -# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel'])) -# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias'])) - for i, block in enumerate(model.blocks.children()): - block_prefix = f'{prefix}Transformer/encoderblock_{i}/' - mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/' - block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale'])) - block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias'])) - block.attn.qkv.weight.copy_(torch.cat([ - _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')])) - block.attn.qkv.bias.copy_(torch.cat([ - _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')])) - block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1)) - block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias'])) - for r in range(2): - getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel'])) - getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias'])) - block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale'])) - block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias'])) - - -def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder): - # interpolate position embedding - embedding_size = pos_embed_checkpoint.shape[-1] - num_patches = visual_encoder.patch_embed.num_patches - num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches - # height (== width) for the checkpoint position embedding - orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) - # height (== width) for the new position embedding - new_size = int(num_patches ** 0.5) - - if orig_size!=new_size: - # class_token and dist_token are kept unchanged - extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] - # only the position tokens are interpolated - pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] - pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) - pos_tokens = torch.nn.functional.interpolate( - pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) - pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) - new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) - print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2)) - - return new_pos_embed - else: - return pos_embed_checkpoint \ No newline at end of file diff --git a/spaces/Rifd/ngees_doang/README.md b/spaces/Rifd/ngees_doang/README.md deleted file mode 100644 index ccf3f6f6dcf64e1f7074aba853da69b5aaa73781..0000000000000000000000000000000000000000 --- a/spaces/Rifd/ngees_doang/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Webui-Cpu-Publictest-AnimemodelsV2-Plus-OrangeMixs-Embed -emoji: 🌍 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: true -duplicated_from: Osmond141319/Webui-Cpu-Publictest-AnimemodelsV2-Plus-OrangeMixs-Embed ---- diff --git a/spaces/Riksarkivet/htr_demo/Makefile b/spaces/Riksarkivet/htr_demo/Makefile deleted file mode 100644 index 1e549e3b454e5429bd034999e8932392b435d004..0000000000000000000000000000000000000000 --- a/spaces/Riksarkivet/htr_demo/Makefile +++ /dev/null @@ -1,45 +0,0 @@ -.PHONY: install - -venv: - python -m venv venv - - -activate: - source ./venv/bin/activate - -install: local_install install_openmmlab - -docker_install: local_install install_openmmlab_with_mim - -local_install: - @echo "Running requirements install" - pip install --upgrade pip - pip install -r requirements.txt - -install_openmmlab_with_mim: - @echo "Running Openmmlab requirements install" - pip install -U openmim - mim install mmengine - mim install mmcv - mim install mmdet - mim install mmocr - -install_openmmlab: - @echo "Running Openmmlab requirements install" - pip install mmengine==0.7.4 - pip install mmcv==2.0.1 - pip install mmdet==3.0.0 - pip install mmocr==1.0.0 - -build: - pip install -e . - gradio app.py - -# clean_for_actions: -# git lfs prune -# git filter-branch --force --index-filter "git rm --cached --ignore-unmatch helper/text/videos/eating_spaghetti.mp4" --prune-empty --tag-name-filter cat -- --all -# git push --force origin main - -# add_space: -# git remote add demo https://huggingface.co/spaces/Riksarkivet/htr_demo -# git push --force demo main \ No newline at end of file diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py deleted file mode 100644 index 5781341bd48766a740f23ebba7a85cf8993642d7..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py +++ /dev/null @@ -1,364 +0,0 @@ -from collections.abc import Sequence - -import mmcv -import numpy as np -import torch -from mmcv.parallel import DataContainer as DC - -from ..builder import PIPELINES - - -def to_tensor(data): - """Convert objects of various python types to :obj:`torch.Tensor`. - - Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, - :class:`Sequence`, :class:`int` and :class:`float`. - - Args: - data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to - be converted. - """ - - if isinstance(data, torch.Tensor): - return data - elif isinstance(data, np.ndarray): - return torch.from_numpy(data) - elif isinstance(data, Sequence) and not mmcv.is_str(data): - return torch.tensor(data) - elif isinstance(data, int): - return torch.LongTensor([data]) - elif isinstance(data, float): - return torch.FloatTensor([data]) - else: - raise TypeError(f'type {type(data)} cannot be converted to tensor.') - - -@PIPELINES.register_module() -class ToTensor(object): - """Convert some results to :obj:`torch.Tensor` by given keys. - - Args: - keys (Sequence[str]): Keys that need to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert data in results to :obj:`torch.Tensor`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted - to :obj:`torch.Tensor`. - """ - for key in self.keys: - results[key] = to_tensor(results[key]) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class ImageToTensor(object): - """Convert image to :obj:`torch.Tensor` by given keys. - - The dimension order of input image is (H, W, C). The pipeline will convert - it to (C, H, W). If only 2 dimension (H, W) is given, the output would be - (1, H, W). - - Args: - keys (Sequence[str]): Key of images to be converted to Tensor. - """ - - def __init__(self, keys): - self.keys = keys - - def __call__(self, results): - """Call function to convert image in results to :obj:`torch.Tensor` and - transpose the channel order. - - Args: - results (dict): Result dict contains the image data to convert. - - Returns: - dict: The result dict contains the image converted - to :obj:`torch.Tensor` and transposed to (C, H, W) order. - """ - for key in self.keys: - img = results[key] - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - results[key] = to_tensor(img.transpose(2, 0, 1)) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(keys={self.keys})' - - -@PIPELINES.register_module() -class Transpose(object): - """Transpose some results by given keys. - - Args: - keys (Sequence[str]): Keys of results to be transposed. - order (Sequence[int]): Order of transpose. - """ - - def __init__(self, keys, order): - self.keys = keys - self.order = order - - def __call__(self, results): - """Call function to transpose the channel order of data in results. - - Args: - results (dict): Result dict contains the data to transpose. - - Returns: - dict: The result dict contains the data transposed to \ - ``self.order``. - """ - for key in self.keys: - results[key] = results[key].transpose(self.order) - return results - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, order={self.order})' - - -@PIPELINES.register_module() -class ToDataContainer(object): - """Convert results to :obj:`mmcv.DataContainer` by given fields. - - Args: - fields (Sequence[dict]): Each field is a dict like - ``dict(key='xxx', **kwargs)``. The ``key`` in result will - be converted to :obj:`mmcv.DataContainer` with ``**kwargs``. - Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))``. - """ - - def __init__(self, - fields=(dict(key='img', stack=True), dict(key='gt_bboxes'), - dict(key='gt_labels'))): - self.fields = fields - - def __call__(self, results): - """Call function to convert data in results to - :obj:`mmcv.DataContainer`. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data converted to \ - :obj:`mmcv.DataContainer`. - """ - - for field in self.fields: - field = field.copy() - key = field.pop('key') - results[key] = DC(results[key], **field) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(fields={self.fields})' - - -@PIPELINES.register_module() -class DefaultFormatBundle(object): - """Default formatting bundle. - - It simplifies the pipeline of formatting common fields, including "img", - "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg". - These fields are formatted as follows. - - - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) - - proposals: (1)to tensor, (2)to DataContainer - - gt_bboxes: (1)to tensor, (2)to DataContainer - - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer - - gt_labels: (1)to tensor, (2)to DataContainer - - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True) - - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \ - (3)to DataContainer (stack=True) - """ - - def __call__(self, results): - """Call function to transform and format common fields in results. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - dict: The result dict contains the data that is formatted with \ - default bundle. - """ - - if 'img' in results: - img = results['img'] - # add default meta keys - results = self._add_default_meta_keys(results) - if len(img.shape) < 3: - img = np.expand_dims(img, -1) - img = np.ascontiguousarray(img.transpose(2, 0, 1)) - results['img'] = DC(to_tensor(img), stack=True) - for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']: - if key not in results: - continue - results[key] = DC(to_tensor(results[key])) - if 'gt_masks' in results: - results['gt_masks'] = DC(results['gt_masks'], cpu_only=True) - if 'gt_semantic_seg' in results: - results['gt_semantic_seg'] = DC( - to_tensor(results['gt_semantic_seg'][None, ...]), stack=True) - return results - - def _add_default_meta_keys(self, results): - """Add default meta keys. - - We set default meta keys including `pad_shape`, `scale_factor` and - `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and - `Pad` are implemented during the whole pipeline. - - Args: - results (dict): Result dict contains the data to convert. - - Returns: - results (dict): Updated result dict contains the data to convert. - """ - img = results['img'] - results.setdefault('pad_shape', img.shape) - results.setdefault('scale_factor', 1.0) - num_channels = 1 if len(img.shape) < 3 else img.shape[2] - results.setdefault( - 'img_norm_cfg', - dict( - mean=np.zeros(num_channels, dtype=np.float32), - std=np.ones(num_channels, dtype=np.float32), - to_rgb=False)) - return results - - def __repr__(self): - return self.__class__.__name__ - - -@PIPELINES.register_module() -class Collect(object): - """Collect data from the loader relevant to the specific task. - - This is usually the last stage of the data loader pipeline. Typically keys - is set to some subset of "img", "proposals", "gt_bboxes", - "gt_bboxes_ignore", "gt_labels", and/or "gt_masks". - - The "img_meta" item is always populated. The contents of the "img_meta" - dictionary depends on "meta_keys". By default this includes: - - - "img_shape": shape of the image input to the network as a tuple \ - (h, w, c). Note that images may be zero padded on the \ - bottom/right if the batch tensor is larger than this shape. - - - "scale_factor": a float indicating the preprocessing scale - - - "flip": a boolean indicating if image flip transform was used - - - "filename": path to the image file - - - "ori_shape": original shape of the image as a tuple (h, w, c) - - - "pad_shape": image shape after padding - - - "img_norm_cfg": a dict of normalization information: - - - mean - per channel mean subtraction - - std - per channel std divisor - - to_rgb - bool indicating if bgr was converted to rgb - - Args: - keys (Sequence[str]): Keys of results to be collected in ``data``. - meta_keys (Sequence[str], optional): Meta keys to be converted to - ``mmcv.DataContainer`` and collected in ``data[img_metas]``. - Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape', - 'pad_shape', 'scale_factor', 'flip', 'flip_direction', - 'img_norm_cfg')`` - """ - - def __init__(self, - keys, - meta_keys=('filename', 'ori_filename', 'ori_shape', - 'img_shape', 'pad_shape', 'scale_factor', 'flip', - 'flip_direction', 'img_norm_cfg')): - self.keys = keys - self.meta_keys = meta_keys - - def __call__(self, results): - """Call function to collect keys in results. The keys in ``meta_keys`` - will be converted to :obj:mmcv.DataContainer. - - Args: - results (dict): Result dict contains the data to collect. - - Returns: - dict: The result dict contains the following keys - - - keys in``self.keys`` - - ``img_metas`` - """ - - data = {} - img_meta = {} - for key in self.meta_keys: - img_meta[key] = results[key] - data['img_metas'] = DC(img_meta, cpu_only=True) - for key in self.keys: - data[key] = results[key] - return data - - def __repr__(self): - return self.__class__.__name__ + \ - f'(keys={self.keys}, meta_keys={self.meta_keys})' - - -@PIPELINES.register_module() -class WrapFieldsToLists(object): - """Wrap fields of the data dictionary into lists for evaluation. - - This class can be used as a last step of a test or validation - pipeline for single image evaluation or inference. - - Example: - >>> test_pipeline = [ - >>> dict(type='LoadImageFromFile'), - >>> dict(type='Normalize', - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - to_rgb=True), - >>> dict(type='Pad', size_divisor=32), - >>> dict(type='ImageToTensor', keys=['img']), - >>> dict(type='Collect', keys=['img']), - >>> dict(type='WrapFieldsToLists') - >>> ] - """ - - def __call__(self, results): - """Call function to wrap fields into lists. - - Args: - results (dict): Result dict contains the data to wrap. - - Returns: - dict: The result dict where value of ``self.keys`` are wrapped \ - into list. - """ - - # Wrap dict fields into lists - for key, val in results.items(): - results[key] = [val] - return results - - def __repr__(self): - return f'{self.__class__.__name__}()' diff --git a/spaces/Rongjiehuang/ProDiff/vocoders/__init__.py b/spaces/Rongjiehuang/ProDiff/vocoders/__init__.py deleted file mode 100644 index 50e4abf21d1cd113f65d353f0101e3550de3bac3..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/vocoders/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from vocoders import hifigan -from vocoders import fastdiff diff --git a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/hgPRM.py b/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/hgPRM.py deleted file mode 100644 index d115071735f81cfd3bbdc34d43e1b475b0b8fc8d..0000000000000000000000000000000000000000 --- a/spaces/Sapphire-356/Video2MC/joints_detectors/Alphapose/SPPE/src/models/hgPRM.py +++ /dev/null @@ -1,236 +0,0 @@ -import torch.nn as nn -from .layers.PRM import Residual as ResidualPyramid -from .layers.Residual import Residual as Residual -from torch.autograd import Variable -import torch -from opt import opt -import math - - -class Hourglass(nn.Module): - def __init__(self, n, nFeats, nModules, inputResH, inputResW, net_type, B, C): - super(Hourglass, self).__init__() - - self.ResidualUp = ResidualPyramid if n >= 2 else Residual - self.ResidualDown = ResidualPyramid if n >= 3 else Residual - - self.depth = n - self.nModules = nModules - self.nFeats = nFeats - self.net_type = net_type - self.B = B - self.C = C - self.inputResH = inputResH - self.inputResW = inputResW - - up1 = self._make_residual(self.ResidualUp, False, inputResH, inputResW) - low1 = nn.Sequential( - nn.MaxPool2d(2), - self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2) - ) - if n > 1: - low2 = Hourglass(n - 1, nFeats, nModules, inputResH / 2, inputResW / 2, net_type, B, C) - else: - low2 = self._make_residual(self.ResidualDown, False, inputResH / 2, inputResW / 2) - - low3 = self._make_residual(self.ResidualDown, True, inputResH / 2, inputResW / 2) - up2 = nn.UpsamplingNearest2d(scale_factor=2) - - self.upperBranch = up1 - self.lowerBranch = nn.Sequential( - low1, - low2, - low3, - up2 - ) - - def _make_residual(self, resBlock, useConv, inputResH, inputResW): - layer_list = [] - for i in range(self.nModules): - layer_list.append(resBlock(self.nFeats, self.nFeats, inputResH, inputResW, - stride=1, net_type=self.net_type, useConv=useConv, - baseWidth=self.B, cardinality=self.C)) - return nn.Sequential(*layer_list) - - def forward(self, x: Variable): - up1 = self.upperBranch(x) - up2 = self.lowerBranch(x) - # out = up1 + up2 - out = torch.add(up1, up2) - return out - - -class PyraNet(nn.Module): - def __init__(self): - super(PyraNet, self).__init__() - - B, C = opt.baseWidth, opt.cardinality - self.inputResH = opt.inputResH / 4 - self.inputResW = opt.inputResW / 4 - self.nStack = opt.nStack - - conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) - if opt.init: - nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 3)) - - cnv1 = nn.Sequential( - conv1, - nn.BatchNorm2d(64), - nn.ReLU(True) - ) - - r1 = nn.Sequential( - ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2, - stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C), - nn.MaxPool2d(2) - ) - r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW, - stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C) - r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW, - stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C) - self.preact = nn.Sequential( - cnv1, - r1, - r4, - r5 - ) - - self.stack_lin = nn.ModuleList() - self.stack_out = nn.ModuleList() - self.stack_lin_ = nn.ModuleList() - self.stack_out_ = nn.ModuleList() - - for i in range(self.nStack): - hg = Hourglass(4, opt.nFeats, opt.nResidual, self.inputResH, self.inputResW, 'preact', B, C) - conv1 = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2)) - lin = nn.Sequential( - hg, - nn.BatchNorm2d(opt.nFeats), - nn.ReLU(True), - conv1, - nn.BatchNorm2d(opt.nFeats), - nn.ReLU(True) - ) - tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses, kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(tmpOut.weight) - self.stack_lin.append(lin) - self.stack_out.append(tmpOut) - if i < self.nStack - 1: - lin_ = nn.Conv2d(opt.nFeats, opt.nFeats, kernel_size=1, stride=1, padding=0) - tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats, kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(lin_.weight) - nn.init.xavier_normal(tmpOut_.weight) - self.stack_lin_.append(lin_) - self.stack_out_.append(tmpOut_) - - def forward(self, x: Variable): - out = [] - inter = self.preact(x) - for i in range(self.nStack): - lin = self.stack_lin[i](inter) - tmpOut = self.stack_out[i](lin) - out.append(tmpOut) - if i < self.nStack - 1: - lin_ = self.stack_lin_[i](lin) - tmpOut_ = self.stack_out_[i](tmpOut) - inter = inter + lin_ + tmpOut_ - return out - - -class PyraNet_Inference(nn.Module): - def __init__(self): - super(PyraNet_Inference, self).__init__() - - B, C = opt.baseWidth, opt.cardinality - self.inputResH = opt.inputResH / 4 - self.inputResW = opt.inputResW / 4 - self.nStack = opt.nStack - - conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3) - if opt.init: - nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 3)) - - cnv1 = nn.Sequential( - conv1, - nn.BatchNorm2d(64), - nn.ReLU(True) - ) - - r1 = nn.Sequential( - ResidualPyramid(64, 128, opt.inputResH / 2, opt.inputResW / 2, - stride=1, net_type='no_preact', useConv=False, baseWidth=B, cardinality=C), - nn.MaxPool2d(2) - ) - r4 = ResidualPyramid(128, 128, self.inputResH, self.inputResW, - stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C) - r5 = ResidualPyramid(128, opt.nFeats, self.inputResH, self.inputResW, - stride=1, net_type='preact', useConv=False, baseWidth=B, cardinality=C) - self.preact = nn.Sequential( - cnv1, - r1, - r4, - r5 - ) - - self.stack_lin = nn.ModuleList() - self.stack_out = nn.ModuleList() - self.stack_lin_ = nn.ModuleList() - self.stack_out_ = nn.ModuleList() - - for i in range(self.nStack): - hg = Hourglass(4, opt.nFeats, opt.nResidual, - self.inputResH, self.inputResW, 'preact', B, C) - conv1 = nn.Conv2d(opt.nFeats, opt.nFeats, - kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(conv1.weight, gain=math.sqrt(1 / 2)) - lin = nn.Sequential( - hg, - nn.BatchNorm2d(opt.nFeats), - nn.ReLU(True), - conv1, - nn.BatchNorm2d(opt.nFeats), - nn.ReLU(True) - ) - tmpOut = nn.Conv2d(opt.nFeats, opt.nClasses, - kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(tmpOut.weight) - self.stack_lin.append(lin) - self.stack_out.append(tmpOut) - if i < self.nStack - 1: - lin_ = nn.Conv2d(opt.nFeats, opt.nFeats, - kernel_size=1, stride=1, padding=0) - tmpOut_ = nn.Conv2d(opt.nClasses, opt.nFeats, - kernel_size=1, stride=1, padding=0) - if opt.init: - nn.init.xavier_normal(lin_.weight) - nn.init.xavier_normal(tmpOut_.weight) - self.stack_lin_.append(lin_) - self.stack_out_.append(tmpOut_) - - def forward(self, x: Variable): - inter = self.preact(x) - for i in range(self.nStack): - lin = self.stack_lin[i](inter) - tmpOut = self.stack_out[i](lin) - out = tmpOut - if i < self.nStack - 1: - lin_ = self.stack_lin_[i](lin) - tmpOut_ = self.stack_out_[i](tmpOut) - inter = inter + lin_ + tmpOut_ - return out - - -def createModel(**kw): - model = PyraNet() - return model - - -def createModel_Inference(**kw): - model = PyraNet_Inference() - return model diff --git a/spaces/SeViLA/SeViLA/app/text_localization.py b/spaces/SeViLA/SeViLA/app/text_localization.py deleted file mode 100644 index d01655b97d7c0e495caf42c81c83b59e1bc3c811..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/app/text_localization.py +++ /dev/null @@ -1,105 +0,0 @@ -""" - # Copyright (c) 2022, salesforce.com, inc. - # All rights reserved. - # SPDX-License-Identifier: BSD-3-Clause - # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import math - -import numpy as np -import streamlit as st -from lavis.models.blip_models.blip_image_text_matching import compute_gradcam -from lavis.processors import load_processor -from PIL import Image - -from app import device, load_demo_image -from app.utils import getAttMap, init_bert_tokenizer, load_blip_itm_model - - -def app(): - model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"]) - - values = list(range(1, 12)) - default_layer_num = values.index(7) - layer_num = ( - st.sidebar.selectbox("Layer number", values, index=default_layer_num) - 1 - ) - - st.markdown( - "

      Text Localization

      ", unsafe_allow_html=True - ) - - vis_processor = load_processor("blip_image_eval").build(image_size=384) - text_processor = load_processor("blip_caption") - - tokenizer = init_bert_tokenizer() - - instructions = "Try the provided image and text or use your own ones." - file = st.file_uploader(instructions) - - query = st.text_input( - "Try a different input.", "A girl playing with her dog on the beach." - ) - - submit_button = st.button("Submit") - - col1, col2 = st.columns(2) - - if file: - raw_img = Image.open(file).convert("RGB") - else: - raw_img = load_demo_image() - - col1.header("Image") - w, h = raw_img.size - scaling_factor = 720 / w - resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) - col1.image(resized_image, use_column_width=True) - - col2.header("GradCam") - - if submit_button: - if model_type.startswith("BLIP"): - blip_type = model_type.split("_")[1] - model = load_blip_itm_model(device, model_type=blip_type) - - img = vis_processor(raw_img).unsqueeze(0).to(device) - qry = text_processor(query) - - qry_tok = tokenizer(qry, return_tensors="pt").to(device) - - norm_img = np.float32(resized_image) / 255 - - gradcam, _ = compute_gradcam(model, img, qry, qry_tok, block_num=layer_num) - - avg_gradcam = getAttMap(norm_img, gradcam[0][1], blur=True) - col2.image(avg_gradcam, use_column_width=True, clamp=True) - - num_cols = 4.0 - num_tokens = len(qry_tok.input_ids[0]) - 2 - - num_rows = int(math.ceil(num_tokens / num_cols)) - - gradcam_iter = iter(gradcam[0][2:-1]) - token_id_iter = iter(qry_tok.input_ids[0][1:-1]) - - for _ in range(num_rows): - with st.container(): - for col in st.columns(int(num_cols)): - token_id = next(token_id_iter, None) - if not token_id: - break - gradcam_img = next(gradcam_iter) - - word = tokenizer.decode([token_id]) - gradcam_todraw = getAttMap(norm_img, gradcam_img, blur=True) - - new_title = ( - '

      {}

      '.format( - word - ) - ) - col.markdown(new_title, unsafe_allow_html=True) - # st.image(image, channels="BGR") - col.image(gradcam_todraw, use_column_width=True, clamp=True) diff --git a/spaces/SerdarHelli/Pix2Pix3D/app.py b/spaces/SerdarHelli/Pix2Pix3D/app.py deleted file mode 100644 index b45d5bd9c3c9afb43cd03b401f0de22a1cf61f50..0000000000000000000000000000000000000000 --- a/spaces/SerdarHelli/Pix2Pix3D/app.py +++ /dev/null @@ -1,328 +0,0 @@ -import sys -import os - -os.system("git clone https://github.com/dunbar12138/pix2pix3D.git") -sys.path.append("pix2pix3D") - -from typing import List, Optional, Tuple, Union -import dnnlib -import numpy as np -import PIL.Image -import torch -from tqdm import tqdm - -import legacy -from camera_utils import LookAtPoseSampler -from huggingface_hub import hf_hub_download -from matplotlib import pyplot as plt -from pathlib import Path -import gradio as gr -from training.utils import color_mask, color_list -import plotly.graph_objects as go -from tqdm import tqdm -import imageio -import trimesh -import mcubes -import copy - -import pickle -import numpy as np -import torch -import dnnlib -from torch_utils import misc -from legacy import * -import io - -os.environ["PYOPENGL_PLATFORM"] = "egl" - - -def get_sigma_field_np(nerf, styles, resolution=512, block_resolution=64): - # return numpy array of forwarded sigma value - # bound = (nerf.rendering_kwargs['ray_end'] - nerf.rendering_kwargs['ray_start']) * 0.5 - bound = nerf.rendering_kwargs['box_warp'] * 0.5 - X = torch.linspace(-bound, bound, resolution).split(block_resolution) - - sigma_np = np.zeros([resolution, resolution, resolution], dtype=np.float32) - - for xi, xs in enumerate(X): - for yi, ys in enumerate(X): - for zi, zs in enumerate(X): - xx, yy, zz = torch.meshgrid(xs, ys, zs) - pts = torch.stack([xx, yy, zz], dim=-1).unsqueeze(0).to(styles.device) # B, H, H, H, C - block_shape = [1, len(xs), len(ys), len(zs)] - out = nerf.sample_mixed(pts.reshape(1,-1,3), None, ws=styles, noise_mode='const') - feat_out, sigma_out = out['rgb'], out['sigma'] - sigma_np[xi * block_resolution: xi * block_resolution + len(xs), \ - yi * block_resolution: yi * block_resolution + len(ys), \ - zi * block_resolution: zi * block_resolution + len(zs)] = sigma_out.reshape(block_shape[1:]).detach().cpu().numpy() - # print(feat_out.shape) - - return sigma_np, bound - - -def extract_geometry(nerf, styles, resolution, threshold): - - # print('threshold: {}'.format(threshold)) - u, bound = get_sigma_field_np(nerf, styles, resolution) - vertices, faces = mcubes.marching_cubes(u, threshold) - # vertices, faces, normals, values = skimage.measure.marching_cubes( - # u, level=10 - # ) - b_min_np = np.array([-bound, -bound, -bound]) - b_max_np = np.array([ bound, bound, bound]) - - vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :] - return vertices.astype('float32'), faces -def render_video(G, ws, intrinsics, num_frames = 120, pitch_range = 0.25, yaw_range = 0.35, neural_rendering_resolution = 128, device='cuda'): - frames, frames_label = [], [] - - for frame_idx in tqdm(range(num_frames)): - cam2world_pose = LookAtPoseSampler.sample(3.14/2 + yaw_range * np.sin(2 * 3.14 * frame_idx / num_frames), - 3.14/2 -0.05 + pitch_range * np.cos(2 * 3.14 * frame_idx / num_frames), - torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device), radius=G.rendering_kwargs['avg_camera_radius'], device=device) - pose = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) - with torch.no_grad(): - # out = G(z, pose, {'mask': batch['mask'].unsqueeze(0).to(device), 'pose': torch.tensor(batch['pose']).unsqueeze(0).to(device)}) - out = G.synthesis(ws, pose, noise_mode='const', neural_rendering_resolution=neural_rendering_resolution) - frames.append(((out['image'].cpu().numpy()[0] + 1) * 127.5).clip(0, 255).astype(np.uint8).transpose(1, 2, 0)) - frames_label.append(color_mask(torch.argmax(out['semantic'], dim=1).cpu().numpy()[0]).astype(np.uint8)) - - return frames, frames_label - -def return_plot_go(mesh_trimesh): - x=np.asarray(mesh_trimesh.vertices).T[0] - y=np.asarray(mesh_trimesh.vertices).T[1] - z=np.asarray(mesh_trimesh.vertices).T[2] - - i=np.asarray(mesh_trimesh.faces).T[0] - j=np.asarray(mesh_trimesh.faces).T[1] - k=np.asarray(mesh_trimesh.faces).T[2] - fig = go.Figure(go.Mesh3d(x=x, y=y, z=z, - i=i, j=j, k=k, - vertexcolor=np.asarray(mesh_trimesh.visual.vertex_colors) , - lighting=dict(ambient=0.5, - diffuse=1, - fresnel=4, - specular=0.5, - roughness=0.05, - facenormalsepsilon=0, - vertexnormalsepsilon=0), - lightposition=dict(x=100, - y=100, - z=1000))) - return fig - - - -network_cat=hf_hub_download("SerdarHelli/pix2pix3d_seg2cat", filename="pix2pix3d_seg2cat.pkl",revision="main") - -models={"seg2cat":network_cat - } - -device='cuda' if torch.cuda.is_available() else 'cpu' -outdir="./" - -class CPU_Unpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'torch.storage' and name == '_load_from_bytes': - return lambda b: torch.load(io.BytesIO(b), map_location='cpu') - return super().find_class(module, name) - -def load_network_pkl_cpu(f, force_fp16=False): - data = CPU_Unpickler(f).load() - - # Legacy TensorFlow pickle => convert. - if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data): - tf_G, tf_D, tf_Gs = data - G = convert_tf_generator(tf_G) - D = convert_tf_discriminator(tf_D) - G_ema = convert_tf_generator(tf_Gs) - data = dict(G=G, D=D, G_ema=G_ema) - - # Add missing fields. - if 'training_set_kwargs' not in data: - data['training_set_kwargs'] = None - if 'augment_pipe' not in data: - data['augment_pipe'] = None - - # Validate contents. - assert isinstance(data['G'], torch.nn.Module) - assert isinstance(data['D'], torch.nn.Module) - assert isinstance(data['G_ema'], torch.nn.Module) - assert isinstance(data['training_set_kwargs'], (dict, type(None))) - assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None))) - - # Force FP16. - if force_fp16: - for key in ['G', 'D', 'G_ema']: - old = data[key] - kwargs = copy.deepcopy(old.init_kwargs) - fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs) - fp16_kwargs.num_fp16_res = 4 - fp16_kwargs.conv_clamp = 256 - if kwargs != old.init_kwargs: - new = type(old)(**kwargs).eval().requires_grad_(False) - misc.copy_params_and_buffers(old, new, require_all=True) - data[key] = new - return data - -color_list = [[255, 255, 255], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255], [255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204], [255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]] - -def colormap2labelmap(color_img): - im_base = np.zeros((color_img.shape[0], color_img.shape[1])) - for idx, color in enumerate(color_list): - - k1=((color_img == np.asarray(color))[:,:,0])*1 - k2=((color_img == np.asarray(color))[:,:,1])*1 - k3=((color_img == np.asarray(color))[:,:,2])*1 - k=((k1*k2*k3)==1) - - im_base[k] = idx - return im_base - - -def checklabelmap(img): - labels=np.unique(img) - for idx,label in enumerate(labels): - img[img==label]=idx - return img - -def get_all(cfg,input,truncation_psi,mesh_resolution,random_seed,fps,num_frames): - - network=models[cfg] - - if device=="cpu": - with dnnlib.util.open_url(network) as f: - G = load_network_pkl_cpu(f)['G_ema'].eval().to(device) - else: - with dnnlib.util.open_url(network) as f: - G = legacy.load_network_pkl(f)['G_ema'].eval().to(device) - - if cfg == 'seg2cat' or cfg == 'seg2face': - neural_rendering_resolution = 128 - data_type = 'seg' - # Initialize pose sampler. - forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device), - radius=G.rendering_kwargs['avg_camera_radius'], device=device) - focal_length = 4.2647 # shapenet has higher FOV - intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device) - forward_pose = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1) - elif cfg == 'edge2car': - neural_rendering_resolution = 64 - data_type= 'edge' - else: - print('Invalid cfg') - - save_dir = Path(outdir) - - - if isinstance(input,str): - input_label =np.asarray( PIL.Image.open(input)) - else: - input_label=np.asarray(input) - - input_label=colormap2labelmap(input_label) - input_label=checklabelmap(input_label) - input_label = np.asarray(input_label).astype(np.uint8) - input_label = torch.from_numpy(input_label).unsqueeze(0).unsqueeze(0).to(device) - input_pose = forward_pose.to(device) - - # Generate videos - z = torch.from_numpy(np.random.RandomState(int(random_seed)).randn(1, G.z_dim).astype('float32')).to(device) - - with torch.no_grad(): - ws = G.mapping(z, input_pose, {'mask': input_label, 'pose': input_pose}) - out = G.synthesis(ws, input_pose, noise_mode='const', neural_rendering_resolution=neural_rendering_resolution) - - image_color = ((out['image'][0].permute(1, 2, 0).cpu().numpy().clip(-1, 1) + 1) * 127.5).astype(np.uint8) - image_seg = color_mask(torch.argmax(out['semantic'][0], dim=0).cpu().numpy()).astype(np.uint8) - mesh_trimesh = trimesh.Trimesh(*extract_geometry(G, ws, resolution=mesh_resolution, threshold=50.)) - - verts_np = np.array(mesh_trimesh.vertices) - colors = torch.zeros((verts_np.shape[0], 3), device=device) - semantic_colors = torch.zeros((verts_np.shape[0], 6), device=device) - samples_color = torch.tensor(verts_np, device=device).unsqueeze(0).float() - - head = 0 - max_batch = 10000000 - with tqdm(total = verts_np.shape[0]) as pbar: - with torch.no_grad(): - while head < verts_np.shape[0]: - torch.manual_seed(0) - out = G.sample_mixed(samples_color[:, head:head+max_batch], None, ws, truncation_psi=truncation_psi, noise_mode='const') - # sigma = out['sigma'] - colors[head:head+max_batch, :] = out['rgb'][0,:,:3] - seg = out['rgb'][0, :, 32:32+6] - semantic_colors[head:head+max_batch, :] = seg - # semantics[:, head:head+max_batch] = out['semantic'] - head += max_batch - pbar.update(max_batch) - - semantic_colors = torch.tensor(color_list,device=device)[torch.argmax(semantic_colors, dim=-1)] - - mesh_trimesh.visual.vertex_colors = semantic_colors.cpu().numpy().astype(np.uint8) - frames, frames_label = render_video(G, ws, intrinsics, num_frames = num_frames, pitch_range = 0.25, yaw_range = 0.35, neural_rendering_resolution=neural_rendering_resolution, device=device) - - # Save the video - video=os.path.join(save_dir ,f'{cfg}_color.mp4') - video_label=os.path.join(save_dir,f'{cfg}_label.mp4') - imageio.mimsave(video, frames, fps=fps) - imageio.mimsave(video_label, frames_label, fps=fps), - fig_mesh=return_plot_go(mesh_trimesh) - return fig_mesh,image_color,image_seg,video,video_label - -title="3D-aware Conditional Image Synthesis" -desc=f''' - - [Arxiv: "3D-aware Conditional Image Synthesis".](https://arxiv.org/abs/2302.08509) - - [Project Page.](https://www.cs.cmu.edu/~pix2pix3D/) - - [For the official implementation.](https://github.com/dunbar12138/pix2pix3D) - - ### Future Work based on interest - - Adding new models for new type objects - - New Customization - - - It is running on {device} - The process can take long time.Especially ,To generate videos and the time of process depends the number of frames,Mesh Resolution and current compiler device. - -''' -demo_inputs=[ - gr.Dropdown(choices=["seg2cat"],label="Choose Model",value="seg2cat"), - gr.Image(type="filepath",shape=(512, 512),label="Mask"), - gr.Slider( minimum=0, maximum=2,label='Truncation PSI',value=1), - gr.Slider( minimum=32, maximum=512,label='Mesh Resolution',value=32), - gr.Slider( minimum=0, maximum=2**16,label='Seed',value=128), - gr.Slider( minimum=10, maximum=120,label='FPS',value=30), - gr.Slider( minimum=10, maximum=120,label='The Number of Frames',value=30), - -] -demo_outputs=[ - gr.Plot(label="Generated Mesh"), - gr.Image(type="pil",shape=(256,256),label="Generated Image"), - gr.Image(type="pil",shape=(256,256),label="Generated LabelMap"), - gr.Video(label="Generated Video ") , - gr.Video(label="Generated Label Video ") - -] -examples = [ - ["seg2cat", "img.png", 1, 32, 128, 30, 30], - ["seg2cat", "img2.png", 1, 32, 128, 30, 30], - ["seg2cat", "img3.png", 1, 32, 128, 30, 30], - -] - - -demo_app = gr.Interface( - fn=get_all, - inputs=demo_inputs, - outputs=demo_outputs, - cache_examples=True, - title=title, - theme="huggingface", - description=desc, - examples=examples, -) -demo_app.launch(debug=True, enable_queue=True) diff --git a/spaces/SkalskiP/MetaCLIP/app.py b/spaces/SkalskiP/MetaCLIP/app.py deleted file mode 100644 index 7c4d3fd948f0e4c213846e45ad160edfb2849501..0000000000000000000000000000000000000000 --- a/spaces/SkalskiP/MetaCLIP/app.py +++ /dev/null @@ -1,63 +0,0 @@ -from typing import List - -import gradio as gr -import numpy as np -import torch -from transformers import CLIPProcessor, CLIPModel - -IMAGENET_CLASSES_FILE = "imagenet-classes.txt" -EXAMPLES = ["dog.jpeg", "car.png"] - -MARKDOWN = """ -# Zero-Shot Image Classification with MetaCLIP - -This is the demo for a zero-shot image classification model based on -[MetaCLIP](https://github.com/facebookresearch/MetaCLIP), described in the paper -[Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) that formalizes CLIP data -curation as a simple algorithm. -""" - - -def load_text_lines(file_path: str) -> List[str]: - with open(file_path, 'r') as file: - lines = file.readlines() - return [line.rstrip() for line in lines] - - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model = CLIPModel.from_pretrained("facebook/metaclip-b32-400m").to(device) -processor = CLIPProcessor.from_pretrained("facebook/metaclip-b32-400m") -imagenet_classes = load_text_lines(IMAGENET_CLASSES_FILE) - - -def classify_image(input_image) -> str: - inputs = processor( - text=imagenet_classes, - images=input_image, - return_tensors="pt", - padding=True).to(device) - outputs = model(**inputs) - probs = outputs.logits_per_image.softmax(dim=1) - class_index = np.argmax(probs.detach().cpu().numpy()) - return imagenet_classes[class_index] - - -with gr.Blocks() as demo: - gr.Markdown(MARKDOWN) - with gr.Row(): - image = gr.Image(image_mode='RGB', type='pil') - output_text = gr.Textbox(label="Output") - submit_button = gr.Button("Submit") - - submit_button.click(classify_image, inputs=[image], outputs=output_text) - - gr.Examples( - examples=EXAMPLES, - fn=classify_image, - inputs=[image], - outputs=[output_text], - cache_examples=True, - run_on_click=True - ) - -demo.launch(debug=False) diff --git a/spaces/Sumit7864/Image-Enhancer/docs/ncnn_conversion.md b/spaces/Sumit7864/Image-Enhancer/docs/ncnn_conversion.md deleted file mode 100644 index e1785cd079ccbb6f0a5ddefe24f63bfe81ce9b21..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/docs/ncnn_conversion.md +++ /dev/null @@ -1,11 +0,0 @@ -# Instructions on converting to NCNN models - -1. Convert to onnx model with `scripts/pytorch2onnx.py`. Remember to modify codes accordingly -1. Convert onnx model to ncnn model - 1. `cd ncnn-master\ncnn\build\tools\onnx` - 1. `onnx2ncnn.exe realesrgan-x4.onnx realesrgan-x4-raw.param realesrgan-x4-raw.bin` -1. Optimize ncnn model - 1. fp16 mode - 1. `cd ncnn-master\ncnn\build\tools` - 1. `ncnnoptimize.exe realesrgan-x4-raw.param realesrgan-x4-raw.bin realesrgan-x4.param realesrgan-x4.bin 1` -1. Modify the blob name in `realesrgan-x4.param`: `data` and `output` diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/__init__.py deleted file mode 100644 index 9043f15e86b7e9c73cef1387acd07100444e0e24..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/terminal/pt_inputhooks/__init__.py +++ /dev/null @@ -1,138 +0,0 @@ -import importlib -import os - -aliases = { - 'qt4': 'qt', - 'gtk2': 'gtk', -} - -backends = [ - "qt", - "qt5", - "qt6", - "gtk", - "gtk2", - "gtk3", - "gtk4", - "tk", - "wx", - "pyglet", - "glut", - "osx", - "asyncio", -] - -registered = {} - -def register(name, inputhook): - """Register the function *inputhook* as an event loop integration.""" - registered[name] = inputhook - - -class UnknownBackend(KeyError): - def __init__(self, name): - self.name = name - - def __str__(self): - return ("No event loop integration for {!r}. " - "Supported event loops are: {}").format(self.name, - ', '.join(backends + sorted(registered))) - - -def set_qt_api(gui): - """Sets the `QT_API` environment variable if it isn't already set.""" - - qt_api = os.environ.get("QT_API", None) - - from IPython.external.qt_loaders import ( - QT_API_PYQT, - QT_API_PYQT5, - QT_API_PYQT6, - QT_API_PYSIDE, - QT_API_PYSIDE2, - QT_API_PYSIDE6, - QT_API_PYQTv1, - loaded_api, - ) - - loaded = loaded_api() - - qt_env2gui = { - QT_API_PYSIDE: "qt4", - QT_API_PYQTv1: "qt4", - QT_API_PYQT: "qt4", - QT_API_PYSIDE2: "qt5", - QT_API_PYQT5: "qt5", - QT_API_PYSIDE6: "qt6", - QT_API_PYQT6: "qt6", - } - if loaded is not None and gui != "qt": - if qt_env2gui[loaded] != gui: - print( - f"Cannot switch Qt versions for this session; will use {qt_env2gui[loaded]}." - ) - return qt_env2gui[loaded] - - if qt_api is not None and gui != "qt": - if qt_env2gui[qt_api] != gui: - print( - f'Request for "{gui}" will be ignored because `QT_API` ' - f'environment variable is set to "{qt_api}"' - ) - return qt_env2gui[qt_api] - else: - if gui == "qt5": - try: - import PyQt5 # noqa - - os.environ["QT_API"] = "pyqt5" - except ImportError: - try: - import PySide2 # noqa - - os.environ["QT_API"] = "pyside2" - except ImportError: - os.environ["QT_API"] = "pyqt5" - elif gui == "qt6": - try: - import PyQt6 # noqa - - os.environ["QT_API"] = "pyqt6" - except ImportError: - try: - import PySide6 # noqa - - os.environ["QT_API"] = "pyside6" - except ImportError: - os.environ["QT_API"] = "pyqt6" - elif gui == "qt": - # Don't set QT_API; let IPython logic choose the version. - if "QT_API" in os.environ.keys(): - del os.environ["QT_API"] - else: - print(f'Unrecognized Qt version: {gui}. Should be "qt5", "qt6", or "qt".') - return - - # Import it now so we can figure out which version it is. - from IPython.external.qt_for_kernel import QT_API - - return qt_env2gui[QT_API] - - -def get_inputhook_name_and_func(gui): - if gui in registered: - return gui, registered[gui] - - if gui not in backends: - raise UnknownBackend(gui) - - if gui in aliases: - return get_inputhook_name_and_func(aliases[gui]) - - gui_mod = gui - if gui.startswith("qt"): - gui = set_qt_api(gui) - gui_mod = "qt" - - mod = importlib.import_module("IPython.terminal.pt_inputhooks." + gui_mod) - return gui, mod.inputhook diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFilter.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFilter.py deleted file mode 100644 index 63d6dcf5cec1576148e0cd972081c6efdccf8e71..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageFilter.py +++ /dev/null @@ -1,549 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard filters -# -# History: -# 1995-11-27 fl Created -# 2002-06-08 fl Added rank and mode filters -# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2002 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# -import functools - - -class Filter: - pass - - -class MultibandFilter(Filter): - pass - - -class BuiltinFilter(MultibandFilter): - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - return image.filter(*self.filterargs) - - -class Kernel(BuiltinFilter): - """ - Create a convolution kernel. The current version only - supports 3x3 and 5x5 integer and floating point kernels. - - In the current version, kernels can only be applied to - "L" and "RGB" images. - - :param size: Kernel size, given as (width, height). In the current - version, this must be (3,3) or (5,5). - :param kernel: A sequence containing kernel weights. - :param scale: Scale factor. If given, the result for each pixel is - divided by this value. The default is the sum of the - kernel weights. - :param offset: Offset. If given, this value is added to the result, - after it has been divided by the scale factor. - """ - - name = "Kernel" - - def __init__(self, size, kernel, scale=None, offset=0): - if scale is None: - # default scale is sum of kernel - scale = functools.reduce(lambda a, b: a + b, kernel) - if size[0] * size[1] != len(kernel): - msg = "not enough coefficients in kernel" - raise ValueError(msg) - self.filterargs = size, scale, offset, kernel - - -class RankFilter(Filter): - """ - Create a rank filter. The rank filter sorts all pixels in - a window of the given size, and returns the ``rank``'th value. - - :param size: The kernel size, in pixels. - :param rank: What pixel value to pick. Use 0 for a min filter, - ``size * size / 2`` for a median filter, ``size * size - 1`` - for a max filter, etc. - """ - - name = "Rank" - - def __init__(self, size, rank): - self.size = size - self.rank = rank - - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - image = image.expand(self.size // 2, self.size // 2) - return image.rankfilter(self.size, self.rank) - - -class MedianFilter(RankFilter): - """ - Create a median filter. Picks the median pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Median" - - def __init__(self, size=3): - self.size = size - self.rank = size * size // 2 - - -class MinFilter(RankFilter): - """ - Create a min filter. Picks the lowest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Min" - - def __init__(self, size=3): - self.size = size - self.rank = 0 - - -class MaxFilter(RankFilter): - """ - Create a max filter. Picks the largest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Max" - - def __init__(self, size=3): - self.size = size - self.rank = size * size - 1 - - -class ModeFilter(Filter): - """ - Create a mode filter. Picks the most frequent pixel value in a box with the - given size. Pixel values that occur only once or twice are ignored; if no - pixel value occurs more than twice, the original pixel value is preserved. - - :param size: The kernel size, in pixels. - """ - - name = "Mode" - - def __init__(self, size=3): - self.size = size - - def filter(self, image): - return image.modefilter(self.size) - - -class GaussianBlur(MultibandFilter): - """Blurs the image with a sequence of extended box filters, which - approximates a Gaussian kernel. For details on accuracy see - - - :param radius: Standard deviation of the Gaussian kernel. - """ - - name = "GaussianBlur" - - def __init__(self, radius=2): - self.radius = radius - - def filter(self, image): - return image.gaussian_blur(self.radius) - - -class BoxBlur(MultibandFilter): - """Blurs the image by setting each pixel to the average value of the pixels - in a square box extending radius pixels in each direction. - Supports float radius of arbitrary size. Uses an optimized implementation - which runs in linear time relative to the size of the image - for any radius value. - - :param radius: Size of the box in one direction. Radius 0 does not blur, - returns an identical image. Radius 1 takes 1 pixel - in each direction, i.e. 9 pixels in total. - """ - - name = "BoxBlur" - - def __init__(self, radius): - if radius < 0: - msg = "radius must be >= 0" - raise ValueError(msg) - self.radius = radius - - def filter(self, image): - return image.box_blur(self.radius) - - -class UnsharpMask(MultibandFilter): - """Unsharp mask filter. - - See Wikipedia's entry on `digital unsharp masking`_ for an explanation of - the parameters. - - :param radius: Blur Radius - :param percent: Unsharp strength, in percent - :param threshold: Threshold controls the minimum brightness change that - will be sharpened - - .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking - - """ # noqa: E501 - - name = "UnsharpMask" - - def __init__(self, radius=2, percent=150, threshold=3): - self.radius = radius - self.percent = percent - self.threshold = threshold - - def filter(self, image): - return image.unsharp_mask(self.radius, self.percent, self.threshold) - - -class BLUR(BuiltinFilter): - name = "Blur" - # fmt: off - filterargs = (5, 5), 16, 0, ( - 1, 1, 1, 1, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class CONTOUR(BuiltinFilter): - name = "Contour" - # fmt: off - filterargs = (3, 3), 1, 255, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class DETAIL(BuiltinFilter): - name = "Detail" - # fmt: off - filterargs = (3, 3), 6, 0, ( - 0, -1, 0, - -1, 10, -1, - 0, -1, 0, - ) - # fmt: on - - -class EDGE_ENHANCE(BuiltinFilter): - name = "Edge-enhance" - # fmt: off - filterargs = (3, 3), 2, 0, ( - -1, -1, -1, - -1, 10, -1, - -1, -1, -1, - ) - # fmt: on - - -class EDGE_ENHANCE_MORE(BuiltinFilter): - name = "Edge-enhance More" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 9, -1, - -1, -1, -1, - ) - # fmt: on - - -class EMBOSS(BuiltinFilter): - name = "Emboss" - # fmt: off - filterargs = (3, 3), 1, 128, ( - -1, 0, 0, - 0, 1, 0, - 0, 0, 0, - ) - # fmt: on - - -class FIND_EDGES(BuiltinFilter): - name = "Find Edges" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class SHARPEN(BuiltinFilter): - name = "Sharpen" - # fmt: off - filterargs = (3, 3), 16, 0, ( - -2, -2, -2, - -2, 32, -2, - -2, -2, -2, - ) - # fmt: on - - -class SMOOTH(BuiltinFilter): - name = "Smooth" - # fmt: off - filterargs = (3, 3), 13, 0, ( - 1, 1, 1, - 1, 5, 1, - 1, 1, 1, - ) - # fmt: on - - -class SMOOTH_MORE(BuiltinFilter): - name = "Smooth More" - # fmt: off - filterargs = (5, 5), 100, 0, ( - 1, 1, 1, 1, 1, - 1, 5, 5, 5, 1, - 1, 5, 44, 5, 1, - 1, 5, 5, 5, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class Color3DLUT(MultibandFilter): - """Three-dimensional color lookup table. - - Transforms 3-channel pixels using the values of the channels as coordinates - in the 3D lookup table and interpolating the nearest elements. - - This method allows you to apply almost any color transformation - in constant time by using pre-calculated decimated tables. - - .. versionadded:: 5.2.0 - - :param size: Size of the table. One int or tuple of (int, int, int). - Minimal size in any dimension is 2, maximum is 65. - :param table: Flat lookup table. A list of ``channels * size**3`` - float elements or a list of ``size**3`` channels-sized - tuples with floats. Channels are changed first, - then first dimension, then second, then third. - Value 0.0 corresponds lowest value of output, 1.0 highest. - :param channels: Number of channels in the table. Could be 3 or 4. - Default is 3. - :param target_mode: A mode for the result image. Should have not less - than ``channels`` channels. Default is ``None``, - which means that mode wouldn't be changed. - """ - - name = "Color 3D LUT" - - def __init__(self, size, table, channels=3, target_mode=None, **kwargs): - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - self.size = size = self._check_size(size) - self.channels = channels - self.mode = target_mode - - # Hidden flag `_copy_table=False` could be used to avoid extra copying - # of the table if the table is specially made for the constructor. - copy_table = kwargs.get("_copy_table", True) - items = size[0] * size[1] * size[2] - wrong_size = False - - numpy = None - if hasattr(table, "shape"): - try: - import numpy - except ImportError: # pragma: no cover - pass - - if numpy and isinstance(table, numpy.ndarray): - if copy_table: - table = table.copy() - - if table.shape in [ - (items * channels,), - (items, channels), - (size[2], size[1], size[0], channels), - ]: - table = table.reshape(items * channels) - else: - wrong_size = True - - else: - if copy_table: - table = list(table) - - # Convert to a flat list - if table and isinstance(table[0], (list, tuple)): - table, raw_table = [], table - for pixel in raw_table: - if len(pixel) != channels: - msg = ( - "The elements of the table should " - f"have a length of {channels}." - ) - raise ValueError(msg) - table.extend(pixel) - - if wrong_size or len(table) != items * channels: - msg = ( - "The table should have either channels * size**3 float items " - "or size**3 items of channels-sized tuples with floats. " - f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. " - f"Actual length: {len(table)}" - ) - raise ValueError(msg) - self.table = table - - @staticmethod - def _check_size(size): - try: - _, _, _ = size - except ValueError as e: - msg = "Size should be either an integer or a tuple of three integers." - raise ValueError(msg) from e - except TypeError: - size = (size, size, size) - size = [int(x) for x in size] - for size_1d in size: - if not 2 <= size_1d <= 65: - msg = "Size should be in [2, 65] range." - raise ValueError(msg) - return size - - @classmethod - def generate(cls, size, callback, channels=3, target_mode=None): - """Generates new LUT using provided callback. - - :param size: Size of the table. Passed to the constructor. - :param callback: Function with three parameters which correspond - three color channels. Will be called ``size**3`` - times with values from 0.0 to 1.0 and should return - a tuple with ``channels`` elements. - :param channels: The number of channels which should return callback. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - size_1d, size_2d, size_3d = cls._check_size(size) - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - - table = [0] * (size_1d * size_2d * size_3d * channels) - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - table[idx_out : idx_out + channels] = callback( - r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) - ) - idx_out += channels - - return cls( - (size_1d, size_2d, size_3d), - table, - channels=channels, - target_mode=target_mode, - _copy_table=False, - ) - - def transform(self, callback, with_normals=False, channels=None, target_mode=None): - """Transforms the table values using provided callback and returns - a new LUT with altered values. - - :param callback: A function which takes old lookup table values - and returns a new set of values. The number - of arguments which function should take is - ``self.channels`` or ``3 + self.channels`` - if ``with_normals`` flag is set. - Should return a tuple of ``self.channels`` or - ``channels`` elements if it is set. - :param with_normals: If true, ``callback`` will be called with - coordinates in the color cube as the first - three arguments. Otherwise, ``callback`` - will be called only with actual color values. - :param channels: The number of channels in the resulting lookup table. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - if channels not in (None, 3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - ch_in = self.channels - ch_out = channels or ch_in - size_1d, size_2d, size_3d = self.size - - table = [0] * (size_1d * size_2d * size_3d * ch_out) - idx_in = 0 - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - values = self.table[idx_in : idx_in + ch_in] - if with_normals: - values = callback( - r / (size_1d - 1), - g / (size_2d - 1), - b / (size_3d - 1), - *values, - ) - else: - values = callback(*values) - table[idx_out : idx_out + ch_out] = values - idx_in += ch_in - idx_out += ch_out - - return type(self)( - self.size, - table, - channels=ch_out, - target_mode=target_mode or self.mode, - _copy_table=False, - ) - - def __repr__(self): - r = [ - f"{self.__class__.__name__} from {self.table.__class__.__name__}", - "size={:d}x{:d}x{:d}".format(*self.size), - f"channels={self.channels:d}", - ] - if self.mode: - r.append(f"target_mode={self.mode}") - return "<{}>".format(" ".join(r)) - - def filter(self, image): - from . import Image - - return image.color_lut_3d( - self.mode or image.mode, - Image.Resampling.BILINEAR, - self.channels, - self.size[0], - self.size[1], - self.size[2], - self.table, - ) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/McIdasImagePlugin.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/McIdasImagePlugin.py deleted file mode 100644 index 17c008b9a6a1218f6e51add4fda83acb92ee06ce..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/McIdasImagePlugin.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Basic McIdas support for PIL -# -# History: -# 1997-05-05 fl Created (8-bit images only) -# 2009-03-08 fl Added 16/32-bit support. -# -# Thanks to Richard Jones and Craig Swank for specs and samples. -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1997. -# -# See the README file for information on usage and redistribution. -# - -import struct - -from . import Image, ImageFile - - -def _accept(s): - return s[:8] == b"\x00\x00\x00\x00\x00\x00\x00\x04" - - -## -# Image plugin for McIdas area images. - - -class McIdasImageFile(ImageFile.ImageFile): - format = "MCIDAS" - format_description = "McIdas area file" - - def _open(self): - # parse area file directory - s = self.fp.read(256) - if not _accept(s) or len(s) != 256: - msg = "not an McIdas area file" - raise SyntaxError(msg) - - self.area_descriptor_raw = s - self.area_descriptor = w = [0] + list(struct.unpack("!64i", s)) - - # get mode - if w[11] == 1: - mode = rawmode = "L" - elif w[11] == 2: - # FIXME: add memory map support - mode = "I" - rawmode = "I;16B" - elif w[11] == 4: - # FIXME: add memory map support - mode = "I" - rawmode = "I;32B" - else: - msg = "unsupported McIdas format" - raise SyntaxError(msg) - - self.mode = mode - self._size = w[10], w[9] - - offset = w[34] + w[15] - stride = w[15] + w[10] * w[11] * w[14] - - self.tile = [("raw", (0, 0) + self.size, offset, (rawmode, stride, 1))] - - -# -------------------------------------------------------------------- -# registry - -Image.register_open(McIdasImageFile.format, McIdasImageFile, _accept) - -# no default extension diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py deleted file mode 100644 index 598e2e5adb0227ed29049e90add6a4b606c03c54..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/clickhouse_connect/cc_sqlalchemy/ddl/tableengine.py +++ /dev/null @@ -1,247 +0,0 @@ -import logging -from typing import Type, Sequence, Optional, Dict - -from sqlalchemy.exc import ArgumentError, SQLAlchemyError -from sqlalchemy.sql.base import SchemaEventTarget -from sqlalchemy.sql.visitors import Visitable - -logger = logging.getLogger(__name__) - -engine_map: Dict[str, Type['TableEngine']] = {} - - -def tuple_expr(expr_name, value): - """ - Create a table parameter with a tuple or list correctly formatted - :param expr_name: parameter - :param value: string or tuple of strings to format - :return: formatted parameter string - """ - if value is None: - return '' - v = f'{expr_name.strip()}' - if isinstance(value, (tuple, list)): - return f" {v} ({','.join(value)})" - return f'{v} {value}' - - -class TableEngine(SchemaEventTarget, Visitable): - """ - SqlAlchemy Schema element to support ClickHouse table engines. At the moment provides no real - functionality other than the CREATE TABLE argument string - """ - arg_names = () - quoted_args = set() - optional_args = set() - eng_params = () - - def __init_subclass__(cls, **kwargs): - engine_map[cls.__name__] = cls - - def __init__(self, kwargs): - # pylint: disable=no-value-for-parameter - Visitable.__init__(self) - self.name = self.__class__.__name__ - te_name = f'{self.name} Table Engine' - engine_args = [] - for arg_name in self.arg_names: - v = kwargs.pop(arg_name, None) - if v is None: - if arg_name in self.optional_args: - continue - raise ValueError(f'Required engine parameter {arg_name} not provided for {te_name}') - if arg_name in self.quoted_args: - engine_args.append(f"'{v}'") - else: - engine_args.append(v) - if engine_args: - self.arg_str = f'({", ".join(engine_args)})' - params = [] - for param_name in self.eng_params: - v = kwargs.pop(param_name, None) - if v is not None: - params.append(tuple_expr(param_name.upper().replace('_', ' '), v)) - - self.full_engine = 'Engine ' + self.name - if engine_args: - self.full_engine += f'({", ".join(engine_args)})' - if params: - self.full_engine += ' ' + ' '.join(params) - - def compile(self): - return self.full_engine - - def check_primary_keys(self, primary_keys: Sequence): - raise SQLAlchemyError(f'Table Engine {self.name} does not support primary keys') - - def _set_parent(self, parent, **_kwargs): - parent.engine = self - - -class Memory(TableEngine): - pass - - -class Log(TableEngine): - pass - - -class StripeLog(TableEngine): - pass - - -class TinyLog(TableEngine): - pass - - -class Null(TableEngine): - pass - - -class Set(TableEngine): - pass - - -class Dictionary(TableEngine): - arg_names = ['dictionary'] - - # pylint: disable=unused-argument - def __init__(self, dictionary: str = None): - super().__init__(locals()) - - -class Merge(TableEngine): - arg_names = ['db_name, tables_regexp'] - - # pylint: disable=unused-argument - def __init__(self, db_name: str = None, tables_regexp: str = None): - super().__init__(locals()) - - -class File(TableEngine): - arg_names = ['fmt'] - - # pylint: disable=unused-argument - def __init__(self, fmt: str = None): - super().__init__(locals()) - - -class Distributed(TableEngine): - arg_names = ['cluster', 'database', 'table', 'sharding_key', 'policy_name'] - optional_args = {'sharding_key', 'policy_name'} - - # pylint: disable=unused-argument - def __init__(self, cluster: str = None, database: str = None, table=None, - sharding_key: str = None, policy_name: str = None): - super().__init__(locals()) - - -class MergeTree(TableEngine): - eng_params = ['order_by', 'partition_key', 'primary_key', 'sample_by'] - - # pylint: disable=unused-argument - def __init__(self, order_by: str = None, primary_key: str = None, - partition_by: str = None, sample_by: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class SummingMergeTree(MergeTree): - pass - - -class AggregatingMergeTree(MergeTree): - pass - - -class ReplacingMergeTree(TableEngine): - arg_names = ['ver'] - optional_args = set(arg_names) - eng_params = MergeTree.eng_params - - # pylint: disable=unused-argument - def __init__(self, ver: str = None, order_by: str = None, primary_key: str = None, - partition_by: str = None, sample_by: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class CollapsingMergeTree(TableEngine): - arg_names = ['sign'] - eng_params = MergeTree.eng_params - - # pylint: disable=unused-argument - def __init__(self, sign: str = None, order_by: str = None, primary_key: str = None, - partition_by: str = None, sample_by: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class VersionedCollapsingMergeTree(TableEngine): - arg_names = ['sign', 'version'] - eng_params = MergeTree.eng_params - - # pylint: disable=unused-argument - def __init__(self, sign: str = None, version: str = None, order_by: str = None, primary_key: str = None, - partition_by: str = None, sample_by: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class GraphiteMergeTree(TableEngine): - arg_names = ['config_section'] - eng_params = MergeTree.eng_params - - # pylint: disable=unused-argument - def __init__(self, config_section: str = None, version: str = None, order_by: str = None, primary_key: str = None, - partition_by: str = None, sample_by: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class ReplicatedMergeTree(TableEngine): - arg_names = ['zk_path', 'replica'] - quoted_args = set(arg_names) - optional_args = quoted_args - eng_params = MergeTree.eng_params - - # pylint: disable=unused-argument - def __init__(self, order_by: str = None, primary_key: str = None, partition_by: str = None, sample_by: str = None, - zk_path: str = None, replica: str = None): - if not order_by and not primary_key: - raise ArgumentError(None, 'Either PRIMARY KEY or ORDER BY must be specified') - super().__init__(locals()) - - -class ReplicatedAggregatingMergeTree(ReplicatedMergeTree): - pass - - -class ReplicatedSummingMergeTree(ReplicatedMergeTree): - pass - - -def build_engine(full_engine: str) -> Optional[TableEngine]: - """ - Factory function to create TableEngine class from ClickHouse full_engine expression - :param full_engine - :return: TableEngine DDL element - """ - if not full_engine: - return None - name = full_engine.split(' ')[0].split('(')[0] - try: - engine_cls = engine_map[name] - except KeyError: - if not name.startswith('System'): - logger.warning('Engine %s not found', name) - return None - engine = engine_cls.__new__(engine_cls) - engine.name = name - engine.full_engine = full_engine - return engine diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace.py deleted file mode 100644 index 684af672b240987a0fa0444698f1a33688bcc271..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace.py +++ /dev/null @@ -1,123 +0,0 @@ -''' -Support for a tag that allows skipping over functions while debugging. -''' -import linecache -import re - -# To suppress tracing a method, add the tag @DontTrace -# to a comment either preceding or on the same line as -# the method definition -# -# E.g.: -# #@DontTrace -# def test1(): -# pass -# -# ... or ... -# -# def test2(): #@DontTrace -# pass -DONT_TRACE_TAG = '@DontTrace' - -# Regular expression to match a decorator (at the beginning -# of a line). -RE_DECORATOR = re.compile(r'^\s*@') - -# Mapping from code object to bool. -# If the key exists, the value is the cached result of should_trace_hook -_filename_to_ignored_lines = {} - - -def default_should_trace_hook(frame, absolute_filename): - ''' - Return True if this frame should be traced, False if tracing should be blocked. - ''' - # First, check whether this code object has a cached value - ignored_lines = _filename_to_ignored_lines.get(absolute_filename) - if ignored_lines is None: - # Now, look up that line of code and check for a @DontTrace - # preceding or on the same line as the method. - # E.g.: - # #@DontTrace - # def test(): - # pass - # ... or ... - # def test(): #@DontTrace - # pass - ignored_lines = {} - lines = linecache.getlines(absolute_filename) - for i_line, line in enumerate(lines): - j = line.find('#') - if j >= 0: - comment = line[j:] - if DONT_TRACE_TAG in comment: - ignored_lines[i_line] = 1 - - # Note: when it's found in the comment, mark it up and down for the decorator lines found. - k = i_line - 1 - while k >= 0: - if RE_DECORATOR.match(lines[k]): - ignored_lines[k] = 1 - k -= 1 - else: - break - - k = i_line + 1 - while k <= len(lines): - if RE_DECORATOR.match(lines[k]): - ignored_lines[k] = 1 - k += 1 - else: - break - - _filename_to_ignored_lines[absolute_filename] = ignored_lines - - func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed - return not ( - func_line - 1 in ignored_lines or # -1 to get line before method - func_line in ignored_lines) # method line - - -should_trace_hook = None - - -def clear_trace_filter_cache(): - ''' - Clear the trace filter cache. - Call this after reloading. - ''' - global should_trace_hook - try: - # Need to temporarily disable a hook because otherwise - # _filename_to_ignored_lines.clear() will never complete. - old_hook = should_trace_hook - should_trace_hook = None - - # Clear the linecache - linecache.clearcache() - _filename_to_ignored_lines.clear() - - finally: - should_trace_hook = old_hook - - -def trace_filter(mode): - ''' - Set the trace filter mode. - - mode: Whether to enable the trace hook. - True: Trace filtering on (skipping methods tagged @DontTrace) - False: Trace filtering off (trace methods tagged @DontTrace) - None/default: Toggle trace filtering. - ''' - global should_trace_hook - if mode is None: - mode = should_trace_hook is None - - if mode: - should_trace_hook = default_should_trace_hook - else: - should_trace_hook = None - - return mode - diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydev_ipython/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/pydev_ipython/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensorflow_tensor.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensorflow_tensor.py deleted file mode 100644 index 5b9d53a76abc0776e820971efcb10600c841ce8b..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/tensorflow_tensor.py +++ /dev/null @@ -1,322 +0,0 @@ -from typing import TYPE_CHECKING, Any, Generic, Type, TypeVar, Union, cast - -import numpy as np - -from docarray.base_doc.base_node import BaseNode -from docarray.typing.proto_register import _register_proto -from docarray.typing.tensor.abstract_tensor import AbstractTensor -from docarray.utils._internal.misc import import_library - -if TYPE_CHECKING: - import tensorflow as tf # type: ignore - from pydantic import BaseConfig - from pydantic.fields import ModelField - - from docarray.computation.tensorflow_backend import TensorFlowCompBackend - from docarray.proto import NdArrayProto -else: - tf = import_library('tensorflow', raise_error=True) - - -T = TypeVar('T', bound='TensorFlowTensor') -ShapeT = TypeVar('ShapeT') - -tf_base: type = type(tf.Tensor) -node_base: type = type(BaseNode) - - -# the mypy error suppression below should not be necessary anymore once the following -# is released in mypy: https://github.com/python/mypy/pull/14135 -class metaTensorFlow( - AbstractTensor.__parametrized_meta__, # type: ignore - node_base, # type: ignore - tf_base, # type: ignore -): # type: ignore - pass - - -@_register_proto(proto_type_name='tensorflow_tensor') -class TensorFlowTensor(AbstractTensor, Generic[ShapeT], metaclass=metaTensorFlow): - """ - TensorFlowTensor class with a `.tensor` attribute of type `tf.Tensor`, - intended for use in a Document. - - This enables (de)serialization from/to protobuf and json, data validation, - and coersion from compatible types like numpy.ndarray. - - This type can also be used in a parametrized way, specifying the shape of the - tensor. - - In comparison to [`TorchTensor`][docarray.typing.TorchTensor] and - [`NdArray`][docarray.typing.tensor.ndarray.NdArray], - [`TensorFlowTensor`][docarray.typing.tensor.tensorflow_tensor.TensorFlowTensor] - is not a subclass of `tf.Tensor` (or `torch.Tensor`, `np.ndarray` respectively). - Instead, the `tf.Tensor` is stored in - [`TensorFlowTensor.tensor`][docarray.typing.tensor.tensorflow_tensor.TensorFlowTensor]. - Therefore, to do operations on the actual tensor data you have to always access the - [`TensorFlowTensor.tensor`][docarray.typing.tensor.tensorflow_tensor.TensorFlowTensor] - attribute. - - --- - - ```python - import tensorflow as tf - from docarray.typing import TensorFlowTensor - - - t = TensorFlowTensor(tensor=tf.zeros((224, 224))) - - # tensorflow functions - broadcasted = tf.broadcast_to(t.tensor, (3, 224, 224)) - broadcasted = tf.broadcast_to(t.unwrap(), (3, 224, 224)) - - # this will fail: - # broadcasted = tf.broadcast_to(t, (3, 224, 224)) - - # tensorflow.Tensor methods: - arr = t.tensor.numpy() - arr = t.unwrap().numpy() - - # this will fail: - # arr = t.numpy() - ``` - - --- - - The [`TensorFlowBackend`] however, operates on our - [`TensorFlowTensor`][docarray.typing.TensorFlowTensor] instances. - Here, you do not have to access the `.tensor` attribute, - but can instead just hand over your - [`TensorFlowTensor`][docarray.typing.TensorFlowTensor] instance. - - --- - - ```python - import tensorflow as tf - from docarray.typing import TensorFlowTensor - - - zeros = TensorFlowTensor(tensor=tf.zeros((3, 224, 224))) - - comp_be = zeros.get_comp_backend() - reshaped = comp_be.reshape(zeros, (224, 224, 3)) - assert comp_be.shape(reshaped) == (224, 224, 3) - ``` - - --- - - You can use [`TensorFlowTensor`][docarray.typing.TensorFlowTensor] in a Document as follows: - - --- - - ```python - from docarray import BaseDoc - from docarray.typing import TensorFlowTensor - import tensorflow as tf - - - class MyDoc(BaseDoc): - tensor: TensorFlowTensor - image_tensor: TensorFlowTensor[3, 224, 224] - square_crop: TensorFlowTensor[3, 'x', 'x'] - random_image: TensorFlowTensor[ - 3, ... - ] # first dimension is fixed, can have arbitrary shape - - - # create a document with tensors - doc = MyDoc( - tensor=tf.zeros((128,)), - image_tensor=tf.zeros((3, 224, 224)), - square_crop=tf.zeros((3, 64, 64)), - random_image=tf.zeros((3, 128, 256)), - ) - - # automatic shape conversion - doc = MyDoc( - tensor=tf.zeros((128,)), - image_tensor=tf.zeros((224, 224, 3)), # will reshape to (3, 224, 224) - square_crop=tf.zeros((3, 128, 128)), - random_image=tf.zeros((3, 64, 128)), - ) - - # !! The following will raise an error due to shape mismatch !! - from pydantic import ValidationError - - try: - doc = MyDoc( - tensor=tf.zeros((128,)), - image_tensor=tf.zeros((224, 224)), # this will fail validation - square_crop=tf.zeros((3, 128, 64)), # this will also fail validation - random_image=tf.zeros(4, 64, 128), # this will also fail validation - ) - except ValidationError as e: - pass - ``` - - --- - """ - - __parametrized_meta__ = metaTensorFlow - - def __init__(self, tensor: tf.Tensor): - super().__init__() - self.tensor = tensor - - def __getitem__(self, item): - from docarray.computation.tensorflow_backend import TensorFlowCompBackend - - tensor = self.unwrap() - if tensor is not None: - tensor = tensor[item] - return TensorFlowCompBackend._cast_output(t=tensor) - - def __setitem__(self, index, value): - """Set a slice of this tensor's `tf.Tensor`""" - t = self.unwrap() - value = tf.cast(value, dtype=t.dtype) - var = tf.Variable(t) - var[index].assign(value) - self.tensor = tf.constant(var) - - def __iter__(self): - """Iterate over the elements of this tensor's `tf.Tensor`.""" - for i in range(len(self)): - yield self[i] - - @classmethod - def __get_validators__(cls): - # one or more validators may be yielded which will be called in the - # order to validate the input, each validator will receive as an input - # the value returned from the previous validator - yield cls.validate - - @classmethod - def validate( - cls: Type[T], - value: Union[T, np.ndarray, Any], - field: 'ModelField', - config: 'BaseConfig', - ) -> T: - if isinstance(value, TensorFlowTensor): - return cast(T, value) - elif isinstance(value, tf.Tensor): - return cls._docarray_from_native(value) - else: - try: - arr: tf.Tensor = tf.constant(value) - return cls(tensor=arr) - except Exception: - pass # handled below - raise ValueError( - f'Expected a tensorflow.Tensor compatible type, got {type(value)}' - ) - - @classmethod - def _docarray_from_native(cls: Type[T], value: Union[tf.Tensor, T]) -> T: - """ - Create a `TensorFlowTensor` from a `tf.Tensor` or `TensorFlowTensor` - instance. - - :param value: instance of `tf.Tensor` or `TensorFlowTensor` - :return: a `TensorFlowTensor` - """ - if isinstance(value, TensorFlowTensor): - if cls.__unparametrizedcls__: # None if the tensor is parametrized - value.__class__ = cls.__unparametrizedcls__ # type: ignore - else: - value.__class__ = cls - return cast(T, value) - else: - if cls.__unparametrizedcls__: # None if the tensor is parametrized - cls_param_ = cls.__unparametrizedcls__ - cls_param = cast(Type[T], cls_param_) - else: - cls_param = cls - - return cls_param(tensor=value) - - @staticmethod - def get_comp_backend() -> 'TensorFlowCompBackend': - """Return the computational backend of the tensor""" - from docarray.computation.tensorflow_backend import TensorFlowCompBackend - - return TensorFlowCompBackend() - - def _docarray_to_json_compatible(self) -> np.ndarray: - """ - Convert `TensorFlowTensor` into a json compatible object - :return: a representation of the tensor compatible with orjson - """ - return self.unwrap().numpy() - - def to_protobuf(self) -> 'NdArrayProto': - """ - Transform self into an NdArrayProto protobuf message. - """ - from docarray.proto import NdArrayProto - - nd_proto = NdArrayProto() - - value_np = self.tensor.numpy() - nd_proto.dense.buffer = value_np.tobytes() - nd_proto.dense.ClearField('shape') - nd_proto.dense.shape.extend(list(value_np.shape)) - nd_proto.dense.dtype = value_np.dtype.str - - return nd_proto - - @classmethod - def from_protobuf(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T': - """ - Read ndarray from a proto msg. - :param pb_msg: - :return: a `TensorFlowTensor` - """ - source = pb_msg.dense - if source.buffer: - x = np.frombuffer(bytearray(source.buffer), dtype=source.dtype) - return cls.from_ndarray(x.reshape(source.shape)) - elif len(source.shape) > 0: - return cls.from_ndarray(np.zeros(source.shape)) - else: - raise ValueError( - f'Proto message {pb_msg} cannot be cast to a TensorFlowTensor.' - ) - - @classmethod - def from_ndarray(cls: Type[T], value: np.ndarray) -> T: - """Create a `TensorFlowTensor` from a numpy array. - - :param value: the numpy array - :return: a `TensorFlowTensor` - """ - return cls._docarray_from_native(tf.convert_to_tensor(value)) - - def unwrap(self) -> tf.Tensor: - """ - Return the original `tf.Tensor` without any memory copy. - - The original view rest intact and is still a Document `TensorFlowTensor` - but the return object is a pure `tf.Tensor` but both object share - the same memory layout. - - --- - - ```python - from docarray.typing import TensorFlowTensor - import tensorflow as tf - - t1 = TensorFlowTensor.validate(tf.zeros((3, 224, 224)), None, None) - # here t1 is a docarray TensorFlowTensor - t2 = t1.unwrap() - # here t2 is a pure tf.Tensor but t1 is still a Docarray TensorFlowTensor - ``` - - --- - :return: a `tf.Tensor` - """ - return self.tensor - - def __len__(self) -> int: - return len(self.tensor) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/catalog.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/catalog.py deleted file mode 100644 index 4f5209b5583d01258437bdc9b52a3dd716bdbbf6..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/data/catalog.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import types -from collections import UserDict -from typing import List - -from annotator.oneformer.detectron2.utils.logger import log_first_n - -__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"] - - -class _DatasetCatalog(UserDict): - """ - A global dictionary that stores information about the datasets and how to obtain them. - - It contains a mapping from strings - (which are names that identify a dataset, e.g. "coco_2014_train") - to a function which parses the dataset and returns the samples in the - format of `list[dict]`. - - The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details) - if used with the data loader functionalities in `data/build.py,data/detection_transform.py`. - - The purpose of having this catalog is to make it easy to choose - different datasets, by just using the strings in the config. - """ - - def register(self, name, func): - """ - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - func (callable): a callable which takes no arguments and returns a list of dicts. - It must return the same results if called multiple times. - """ - assert callable(func), "You must register a function with `DatasetCatalog.register`!" - assert name not in self, "Dataset '{}' is already registered!".format(name) - self[name] = func - - def get(self, name): - """ - Call the registered function and return its results. - - Args: - name (str): the name that identifies a dataset, e.g. "coco_2014_train". - - Returns: - list[dict]: dataset annotations. - """ - try: - f = self[name] - except KeyError as e: - raise KeyError( - "Dataset '{}' is not registered! Available datasets are: {}".format( - name, ", ".join(list(self.keys())) - ) - ) from e - return f() - - def list(self) -> List[str]: - """ - List all registered datasets. - - Returns: - list[str] - """ - return list(self.keys()) - - def remove(self, name): - """ - Alias of ``pop``. - """ - self.pop(name) - - def __str__(self): - return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys())) - - __repr__ = __str__ - - -DatasetCatalog = _DatasetCatalog() -DatasetCatalog.__doc__ = ( - _DatasetCatalog.__doc__ - + """ - .. automethod:: detectron2.data.catalog.DatasetCatalog.register - .. automethod:: detectron2.data.catalog.DatasetCatalog.get -""" -) - - -class Metadata(types.SimpleNamespace): - """ - A class that supports simple attribute setter/getter. - It is intended for storing metadata of a dataset and make it accessible globally. - - Examples: - :: - # somewhere when you load the data: - MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"] - - # somewhere when you print statistics or visualize: - classes = MetadataCatalog.get("mydataset").thing_classes - """ - - # the name of the dataset - # set default to N/A so that `self.name` in the errors will not trigger getattr again - name: str = "N/A" - - _RENAMED = { - "class_names": "thing_classes", - "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id", - "stuff_class_names": "stuff_classes", - } - - def __getattr__(self, key): - if key in self._RENAMED: - log_first_n( - logging.WARNING, - "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), - n=10, - ) - return getattr(self, self._RENAMED[key]) - - # "name" exists in every metadata - if len(self.__dict__) > 1: - raise AttributeError( - "Attribute '{}' does not exist in the metadata of dataset '{}'. Available " - "keys are {}.".format(key, self.name, str(self.__dict__.keys())) - ) - else: - raise AttributeError( - f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': " - "metadata is empty." - ) - - def __setattr__(self, key, val): - if key in self._RENAMED: - log_first_n( - logging.WARNING, - "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]), - n=10, - ) - setattr(self, self._RENAMED[key], val) - - # Ensure that metadata of the same name stays consistent - try: - oldval = getattr(self, key) - assert oldval == val, ( - "Attribute '{}' in the metadata of '{}' cannot be set " - "to a different value!\n{} != {}".format(key, self.name, oldval, val) - ) - except AttributeError: - super().__setattr__(key, val) - - def as_dict(self): - """ - Returns all the metadata as a dict. - Note that modifications to the returned dict will not reflect on the Metadata object. - """ - return copy.copy(self.__dict__) - - def set(self, **kwargs): - """ - Set multiple metadata with kwargs. - """ - for k, v in kwargs.items(): - setattr(self, k, v) - return self - - def get(self, key, default=None): - """ - Access an attribute and return its value if exists. - Otherwise return default. - """ - try: - return getattr(self, key) - except AttributeError: - return default - - -class _MetadataCatalog(UserDict): - """ - MetadataCatalog is a global dictionary that provides access to - :class:`Metadata` of a given dataset. - - The metadata associated with a certain name is a singleton: once created, the - metadata will stay alive and will be returned by future calls to ``get(name)``. - - It's like global variables, so don't abuse it. - It's meant for storing knowledge that's constant and shared across the execution - of the program, e.g.: the class names in COCO. - """ - - def get(self, name): - """ - Args: - name (str): name of a dataset (e.g. coco_2014_train). - - Returns: - Metadata: The :class:`Metadata` instance associated with this name, - or create an empty one if none is available. - """ - assert len(name) - r = super().get(name, None) - if r is None: - r = self[name] = Metadata(name=name) - return r - - def list(self): - """ - List all registered metadata. - - Returns: - list[str]: keys (names of datasets) of all registered metadata - """ - return list(self.keys()) - - def remove(self, name): - """ - Alias of ``pop``. - """ - self.pop(name) - - def __str__(self): - return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys())) - - __repr__ = __str__ - - -MetadataCatalog = _MetadataCatalog() -MetadataCatalog.__doc__ = ( - _MetadataCatalog.__doc__ - + """ - .. automethod:: detectron2.data.catalog.MetadataCatalog.get -""" -) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/engine/defaults.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/engine/defaults.py deleted file mode 100644 index 51d49148ca7b048402a63490bf7df83a43c65d9f..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/engine/defaults.py +++ /dev/null @@ -1,715 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -""" -This file contains components with some default boilerplate logic user may need -in training / testing. They will not work for everyone, but many users may find them useful. - -The behavior of functions/classes in this file is subject to change, -since they are meant to represent the "common default behavior" people need in their projects. -""" - -import argparse -import logging -import os -import sys -import weakref -from collections import OrderedDict -from typing import Optional -import torch -from fvcore.nn.precise_bn import get_bn_modules -from omegaconf import OmegaConf -from torch.nn.parallel import DistributedDataParallel - -import annotator.oneformer.detectron2.data.transforms as T -from annotator.oneformer.detectron2.checkpoint import DetectionCheckpointer -from annotator.oneformer.detectron2.config import CfgNode, LazyConfig -from annotator.oneformer.detectron2.data import ( - MetadataCatalog, - build_detection_test_loader, - build_detection_train_loader, -) -from annotator.oneformer.detectron2.evaluation import ( - DatasetEvaluator, - inference_on_dataset, - print_csv_format, - verify_results, -) -from annotator.oneformer.detectron2.modeling import build_model -from annotator.oneformer.detectron2.solver import build_lr_scheduler, build_optimizer -from annotator.oneformer.detectron2.utils import comm -from annotator.oneformer.detectron2.utils.collect_env import collect_env_info -from annotator.oneformer.detectron2.utils.env import seed_all_rng -from annotator.oneformer.detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter -from annotator.oneformer.detectron2.utils.file_io import PathManager -from annotator.oneformer.detectron2.utils.logger import setup_logger - -from . import hooks -from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase - -__all__ = [ - "create_ddp_model", - "default_argument_parser", - "default_setup", - "default_writers", - "DefaultPredictor", - "DefaultTrainer", -] - - -def create_ddp_model(model, *, fp16_compression=False, **kwargs): - """ - Create a DistributedDataParallel model if there are >1 processes. - - Args: - model: a torch.nn.Module - fp16_compression: add fp16 compression hooks to the ddp object. - See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook - kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. - """ # noqa - if comm.get_world_size() == 1: - return model - if "device_ids" not in kwargs: - kwargs["device_ids"] = [comm.get_local_rank()] - ddp = DistributedDataParallel(model, **kwargs) - if fp16_compression: - from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks - - ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) - return ddp - - -def default_argument_parser(epilog=None): - """ - Create a parser with some common arguments used by detectron2 users. - - Args: - epilog (str): epilog passed to ArgumentParser describing the usage. - - Returns: - argparse.ArgumentParser: - """ - parser = argparse.ArgumentParser( - epilog=epilog - or f""" -Examples: - -Run on single machine: - $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml - -Change some config options: - $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 - -Run on multiple machines: - (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags] - (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags] -""", - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") - parser.add_argument( - "--resume", - action="store_true", - help="Whether to attempt to resume from the checkpoint directory. " - "See documentation of `DefaultTrainer.resume_or_load()` for what it means.", - ) - parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") - parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") - parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") - parser.add_argument( - "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" - ) - - # PyTorch still may leave orphan processes in multi-gpu training. - # Therefore we use a deterministic way to obtain port, - # so that users are aware of orphan processes by seeing the port occupied. - port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14 - parser.add_argument( - "--dist-url", - default="tcp://127.0.0.1:{}".format(port), - help="initialization URL for pytorch distributed backend. See " - "https://pytorch.org/docs/stable/distributed.html for details.", - ) - parser.add_argument( - "opts", - help=""" -Modify config options at the end of the command. For Yacs configs, use -space-separated "PATH.KEY VALUE" pairs. -For python-based LazyConfig, use "path.key=value". - """.strip(), - default=None, - nargs=argparse.REMAINDER, - ) - return parser - - -def _try_get_key(cfg, *keys, default=None): - """ - Try select keys from cfg until the first key that exists. Otherwise return default. - """ - if isinstance(cfg, CfgNode): - cfg = OmegaConf.create(cfg.dump()) - for k in keys: - none = object() - p = OmegaConf.select(cfg, k, default=none) - if p is not none: - return p - return default - - -def _highlight(code, filename): - try: - import pygments - except ImportError: - return code - - from pygments.lexers import Python3Lexer, YamlLexer - from pygments.formatters import Terminal256Formatter - - lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() - code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) - return code - - -def default_setup(cfg, args): - """ - Perform some basic common setups at the beginning of a job, including: - - 1. Set up the detectron2 logger - 2. Log basic information about environment, cmdline arguments, and config - 3. Backup the config to the output directory - - Args: - cfg (CfgNode or omegaconf.DictConfig): the full config to be used - args (argparse.NameSpace): the command line arguments to be logged - """ - output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") - if comm.is_main_process() and output_dir: - PathManager.mkdirs(output_dir) - - rank = comm.get_rank() - setup_logger(output_dir, distributed_rank=rank, name="fvcore") - logger = setup_logger(output_dir, distributed_rank=rank) - - logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) - logger.info("Environment info:\n" + collect_env_info()) - - logger.info("Command line arguments: " + str(args)) - if hasattr(args, "config_file") and args.config_file != "": - logger.info( - "Contents of args.config_file={}:\n{}".format( - args.config_file, - _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), - ) - ) - - if comm.is_main_process() and output_dir: - # Note: some of our scripts may expect the existence of - # config.yaml in output directory - path = os.path.join(output_dir, "config.yaml") - if isinstance(cfg, CfgNode): - logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) - with PathManager.open(path, "w") as f: - f.write(cfg.dump()) - else: - LazyConfig.save(cfg, path) - logger.info("Full config saved to {}".format(path)) - - # make sure each worker has a different, yet deterministic seed if specified - seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) - seed_all_rng(None if seed < 0 else seed + rank) - - # cudnn benchmark has large overhead. It shouldn't be used considering the small size of - # typical validation set. - if not (hasattr(args, "eval_only") and args.eval_only): - torch.backends.cudnn.benchmark = _try_get_key( - cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False - ) - - -def default_writers(output_dir: str, max_iter: Optional[int] = None): - """ - Build a list of :class:`EventWriter` to be used. - It now consists of a :class:`CommonMetricPrinter`, - :class:`TensorboardXWriter` and :class:`JSONWriter`. - - Args: - output_dir: directory to store JSON metrics and tensorboard events - max_iter: the total number of iterations - - Returns: - list[EventWriter]: a list of :class:`EventWriter` objects. - """ - PathManager.mkdirs(output_dir) - return [ - # It may not always print what you want to see, since it prints "common" metrics only. - CommonMetricPrinter(max_iter), - JSONWriter(os.path.join(output_dir, "metrics.json")), - TensorboardXWriter(output_dir), - ] - - -class DefaultPredictor: - """ - Create a simple end-to-end predictor with the given config that runs on - single device for a single input image. - - Compared to using the model directly, this class does the following additions: - - 1. Load checkpoint from `cfg.MODEL.WEIGHTS`. - 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`. - 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`. - 4. Take one input image and produce a single output, instead of a batch. - - This is meant for simple demo purposes, so it does the above steps automatically. - This is not meant for benchmarks or running complicated inference logic. - If you'd like to do anything more complicated, please refer to its source code as - examples to build and use the model manually. - - Attributes: - metadata (Metadata): the metadata of the underlying dataset, obtained from - cfg.DATASETS.TEST. - - Examples: - :: - pred = DefaultPredictor(cfg) - inputs = cv2.imread("input.jpg") - outputs = pred(inputs) - """ - - def __init__(self, cfg): - self.cfg = cfg.clone() # cfg can be modified by model - self.model = build_model(self.cfg) - self.model.eval() - if len(cfg.DATASETS.TEST): - self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0]) - - checkpointer = DetectionCheckpointer(self.model) - checkpointer.load(cfg.MODEL.WEIGHTS) - - self.aug = T.ResizeShortestEdge( - [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST - ) - - self.input_format = cfg.INPUT.FORMAT - assert self.input_format in ["RGB", "BGR"], self.input_format - - def __call__(self, original_image): - """ - Args: - original_image (np.ndarray): an image of shape (H, W, C) (in BGR order). - - Returns: - predictions (dict): - the output of the model for one image only. - See :doc:`/tutorials/models` for details about the format. - """ - with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258 - # Apply pre-processing to image. - if self.input_format == "RGB": - # whether the model expects BGR inputs or RGB - original_image = original_image[:, :, ::-1] - height, width = original_image.shape[:2] - image = self.aug.get_transform(original_image).apply_image(original_image) - image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1)) - - inputs = {"image": image, "height": height, "width": width} - predictions = self.model([inputs])[0] - return predictions - - -class DefaultTrainer(TrainerBase): - """ - A trainer with default training logic. It does the following: - - 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader - defined by the given config. Create a LR scheduler defined by the config. - 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when - `resume_or_load` is called. - 3. Register a few common hooks defined by the config. - - It is created to simplify the **standard model training workflow** and reduce code boilerplate - for users who only need the standard training workflow, with standard features. - It means this class makes *many assumptions* about your training logic that - may easily become invalid in a new research. In fact, any assumptions beyond those made in the - :class:`SimpleTrainer` are too much for research. - - The code of this class has been annotated about restrictive assumptions it makes. - When they do not work for you, you're encouraged to: - - 1. Overwrite methods of this class, OR: - 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and - nothing else. You can then add your own hooks if needed. OR: - 3. Write your own training loop similar to `tools/plain_train_net.py`. - - See the :doc:`/tutorials/training` tutorials for more details. - - Note that the behavior of this class, like other functions/classes in - this file, is not stable, since it is meant to represent the "common default behavior". - It is only guaranteed to work well with the standard models and training workflow in detectron2. - To obtain more stable behavior, write your own training logic with other public APIs. - - Examples: - :: - trainer = DefaultTrainer(cfg) - trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS - trainer.train() - - Attributes: - scheduler: - checkpointer (DetectionCheckpointer): - cfg (CfgNode): - """ - - def __init__(self, cfg): - """ - Args: - cfg (CfgNode): - """ - super().__init__() - logger = logging.getLogger("detectron2") - if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2 - setup_logger() - cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size()) - - # Assume these objects must be constructed in this order. - model = self.build_model(cfg) - optimizer = self.build_optimizer(cfg, model) - data_loader = self.build_train_loader(cfg) - - model = create_ddp_model(model, broadcast_buffers=False) - self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( - model, data_loader, optimizer - ) - - self.scheduler = self.build_lr_scheduler(cfg, optimizer) - self.checkpointer = DetectionCheckpointer( - # Assume you want to save checkpoints together with logs/statistics - model, - cfg.OUTPUT_DIR, - trainer=weakref.proxy(self), - ) - self.start_iter = 0 - self.max_iter = cfg.SOLVER.MAX_ITER - self.cfg = cfg - - self.register_hooks(self.build_hooks()) - - def resume_or_load(self, resume=True): - """ - If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by - a `last_checkpoint` file), resume from the file. Resuming means loading all - available states (eg. optimizer and scheduler) and update iteration counter - from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. - - Otherwise, this is considered as an independent training. The method will load model - weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start - from iteration 0. - - Args: - resume (bool): whether to do resume or not - """ - self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) - if resume and self.checkpointer.has_checkpoint(): - # The checkpoint stores the training iteration that just finished, thus we start - # at the next iteration - self.start_iter = self.iter + 1 - - def build_hooks(self): - """ - Build a list of default hooks, including timing, evaluation, - checkpointing, lr scheduling, precise BN, writing events. - - Returns: - list[HookBase]: - """ - cfg = self.cfg.clone() - cfg.defrost() - cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN - - ret = [ - hooks.IterationTimer(), - hooks.LRScheduler(), - hooks.PreciseBN( - # Run at the same freq as (but before) evaluation. - cfg.TEST.EVAL_PERIOD, - self.model, - # Build a new data loader to not affect training - self.build_train_loader(cfg), - cfg.TEST.PRECISE_BN.NUM_ITER, - ) - if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) - else None, - ] - - # Do PreciseBN before checkpointer, because it updates the model and need to - # be saved by checkpointer. - # This is not always the best: if checkpointing has a different frequency, - # some checkpoints may have more precise statistics than others. - if comm.is_main_process(): - ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) - - def test_and_save_results(): - self._last_eval_results = self.test(self.cfg, self.model) - return self._last_eval_results - - # Do evaluation after checkpointer, because then if it fails, - # we can use the saved checkpoint to debug. - ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) - - if comm.is_main_process(): - # Here the default print/log frequency of each writer is used. - # run writers in the end, so that evaluation metrics are written - ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) - return ret - - def build_writers(self): - """ - Build a list of writers to be used using :func:`default_writers()`. - If you'd like a different list of writers, you can overwrite it in - your trainer. - - Returns: - list[EventWriter]: a list of :class:`EventWriter` objects. - """ - return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) - - def train(self): - """ - Run training. - - Returns: - OrderedDict of results, if evaluation is enabled. Otherwise None. - """ - super().train(self.start_iter, self.max_iter) - if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): - assert hasattr( - self, "_last_eval_results" - ), "No evaluation results obtained during training!" - verify_results(self.cfg, self._last_eval_results) - return self._last_eval_results - - def run_step(self): - self._trainer.iter = self.iter - self._trainer.run_step() - - def state_dict(self): - ret = super().state_dict() - ret["_trainer"] = self._trainer.state_dict() - return ret - - def load_state_dict(self, state_dict): - super().load_state_dict(state_dict) - self._trainer.load_state_dict(state_dict["_trainer"]) - - @classmethod - def build_model(cls, cfg): - """ - Returns: - torch.nn.Module: - - It now calls :func:`detectron2.modeling.build_model`. - Overwrite it if you'd like a different model. - """ - model = build_model(cfg) - logger = logging.getLogger(__name__) - logger.info("Model:\n{}".format(model)) - return model - - @classmethod - def build_optimizer(cls, cfg, model): - """ - Returns: - torch.optim.Optimizer: - - It now calls :func:`detectron2.solver.build_optimizer`. - Overwrite it if you'd like a different optimizer. - """ - return build_optimizer(cfg, model) - - @classmethod - def build_lr_scheduler(cls, cfg, optimizer): - """ - It now calls :func:`detectron2.solver.build_lr_scheduler`. - Overwrite it if you'd like a different scheduler. - """ - return build_lr_scheduler(cfg, optimizer) - - @classmethod - def build_train_loader(cls, cfg): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_train_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_train_loader(cfg) - - @classmethod - def build_test_loader(cls, cfg, dataset_name): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_test_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_test_loader(cfg, dataset_name) - - @classmethod - def build_evaluator(cls, cfg, dataset_name): - """ - Returns: - DatasetEvaluator or None - - It is not implemented by default. - """ - raise NotImplementedError( - """ -If you want DefaultTrainer to automatically run evaluation, -please implement `build_evaluator()` in subclasses (see train_net.py for example). -Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example). -""" - ) - - @classmethod - def test(cls, cfg, model, evaluators=None): - """ - Evaluate the given model. The given model is expected to already contain - weights to evaluate. - - Args: - cfg (CfgNode): - model (nn.Module): - evaluators (list[DatasetEvaluator] or None): if None, will call - :meth:`build_evaluator`. Otherwise, must have the same length as - ``cfg.DATASETS.TEST``. - - Returns: - dict: a dict of result metrics - """ - logger = logging.getLogger(__name__) - if isinstance(evaluators, DatasetEvaluator): - evaluators = [evaluators] - if evaluators is not None: - assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( - len(cfg.DATASETS.TEST), len(evaluators) - ) - - results = OrderedDict() - for idx, dataset_name in enumerate(cfg.DATASETS.TEST): - data_loader = cls.build_test_loader(cfg, dataset_name) - # When evaluators are passed in as arguments, - # implicitly assume that evaluators can be created before data_loader. - if evaluators is not None: - evaluator = evaluators[idx] - else: - try: - evaluator = cls.build_evaluator(cfg, dataset_name) - except NotImplementedError: - logger.warn( - "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " - "or implement its `build_evaluator` method." - ) - results[dataset_name] = {} - continue - results_i = inference_on_dataset(model, data_loader, evaluator) - results[dataset_name] = results_i - if comm.is_main_process(): - assert isinstance( - results_i, dict - ), "Evaluator must return a dict on the main process. Got {} instead.".format( - results_i - ) - logger.info("Evaluation results for {} in csv format:".format(dataset_name)) - print_csv_format(results_i) - - if len(results) == 1: - results = list(results.values())[0] - return results - - @staticmethod - def auto_scale_workers(cfg, num_workers: int): - """ - When the config is defined for certain number of workers (according to - ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of - workers currently in use, returns a new cfg where the total batch size - is scaled so that the per-GPU batch size stays the same as the - original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. - - Other config options are also scaled accordingly: - * training steps and warmup steps are scaled inverse proportionally. - * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. - - For example, with the original config like the following: - - .. code-block:: yaml - - IMS_PER_BATCH: 16 - BASE_LR: 0.1 - REFERENCE_WORLD_SIZE: 8 - MAX_ITER: 5000 - STEPS: (4000,) - CHECKPOINT_PERIOD: 1000 - - When this config is used on 16 GPUs instead of the reference number 8, - calling this method will return a new config with: - - .. code-block:: yaml - - IMS_PER_BATCH: 32 - BASE_LR: 0.2 - REFERENCE_WORLD_SIZE: 16 - MAX_ITER: 2500 - STEPS: (2000,) - CHECKPOINT_PERIOD: 500 - - Note that both the original config and this new config can be trained on 16 GPUs. - It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). - - Returns: - CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. - """ - old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE - if old_world_size == 0 or old_world_size == num_workers: - return cfg - cfg = cfg.clone() - frozen = cfg.is_frozen() - cfg.defrost() - - assert ( - cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 - ), "Invalid REFERENCE_WORLD_SIZE in config!" - scale = num_workers / old_world_size - bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale)) - lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale - max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale)) - warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale)) - cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS) - cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) - cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) - cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant - logger = logging.getLogger(__name__) - logger.info( - f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " - f"max_iter={max_iter}, warmup={warmup_iter}." - ) - - if frozen: - cfg.freeze() - return cfg - - -# Access basic attributes from the underlying trainer -for _attr in ["model", "data_loader", "optimizer"]: - setattr( - DefaultTrainer, - _attr, - property( - # getter - lambda self, x=_attr: getattr(self._trainer, x), - # setter - lambda self, value, x=_attr: setattr(self._trainer, x, value), - ), - ) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/meta_arch/panoptic_fpn.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/meta_arch/panoptic_fpn.py deleted file mode 100644 index 1ca5f19a0ce0099a49aad8bb6b659355c4f6e200..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/modeling/meta_arch/panoptic_fpn.py +++ /dev/null @@ -1,269 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import logging -from typing import Dict, List -import torch -from torch import nn - -from annotator.oneformer.detectron2.config import configurable -from annotator.oneformer.detectron2.structures import ImageList - -from ..postprocessing import detector_postprocess, sem_seg_postprocess -from .build import META_ARCH_REGISTRY -from .rcnn import GeneralizedRCNN -from .semantic_seg import build_sem_seg_head - -__all__ = ["PanopticFPN"] - - -@META_ARCH_REGISTRY.register() -class PanopticFPN(GeneralizedRCNN): - """ - Implement the paper :paper:`PanopticFPN`. - """ - - @configurable - def __init__( - self, - *, - sem_seg_head: nn.Module, - combine_overlap_thresh: float = 0.5, - combine_stuff_area_thresh: float = 4096, - combine_instances_score_thresh: float = 0.5, - **kwargs, - ): - """ - NOTE: this interface is experimental. - - Args: - sem_seg_head: a module for the semantic segmentation head. - combine_overlap_thresh: combine masks into one instances if - they have enough overlap - combine_stuff_area_thresh: ignore stuff areas smaller than this threshold - combine_instances_score_thresh: ignore instances whose score is - smaller than this threshold - - Other arguments are the same as :class:`GeneralizedRCNN`. - """ - super().__init__(**kwargs) - self.sem_seg_head = sem_seg_head - # options when combining instance & semantic outputs - self.combine_overlap_thresh = combine_overlap_thresh - self.combine_stuff_area_thresh = combine_stuff_area_thresh - self.combine_instances_score_thresh = combine_instances_score_thresh - - @classmethod - def from_config(cls, cfg): - ret = super().from_config(cfg) - ret.update( - { - "combine_overlap_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH, - "combine_stuff_area_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT, - "combine_instances_score_thresh": cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH, # noqa - } - ) - ret["sem_seg_head"] = build_sem_seg_head(cfg, ret["backbone"].output_shape()) - logger = logging.getLogger(__name__) - if not cfg.MODEL.PANOPTIC_FPN.COMBINE.ENABLED: - logger.warning( - "PANOPTIC_FPN.COMBINED.ENABLED is no longer used. " - " model.inference(do_postprocess=) should be used to toggle postprocessing." - ) - if cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT != 1.0: - w = cfg.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT - logger.warning( - "PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT should be replaced by weights on each ROI head." - ) - - def update_weight(x): - if isinstance(x, dict): - return {k: v * w for k, v in x.items()} - else: - return x * w - - roi_heads = ret["roi_heads"] - roi_heads.box_predictor.loss_weight = update_weight(roi_heads.box_predictor.loss_weight) - roi_heads.mask_head.loss_weight = update_weight(roi_heads.mask_head.loss_weight) - return ret - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - - For now, each item in the list is a dict that contains: - - * "image": Tensor, image in (C, H, W) format. - * "instances": Instances - * "sem_seg": semantic segmentation ground truth. - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - - * "instances": see :meth:`GeneralizedRCNN.forward` for its format. - * "sem_seg": see :meth:`SemanticSegmentor.forward` for its format. - * "panoptic_seg": See the return value of - :func:`combine_semantic_and_instance_outputs` for its format. - """ - if not self.training: - return self.inference(batched_inputs) - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - - assert "sem_seg" in batched_inputs[0] - gt_sem_seg = [x["sem_seg"].to(self.device) for x in batched_inputs] - gt_sem_seg = ImageList.from_tensors( - gt_sem_seg, - self.backbone.size_divisibility, - self.sem_seg_head.ignore_value, - self.backbone.padding_constraints, - ).tensor - sem_seg_results, sem_seg_losses = self.sem_seg_head(features, gt_sem_seg) - - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - proposals, proposal_losses = self.proposal_generator(images, features, gt_instances) - detector_results, detector_losses = self.roi_heads( - images, features, proposals, gt_instances - ) - - losses = sem_seg_losses - losses.update(proposal_losses) - losses.update(detector_losses) - return losses - - def inference(self, batched_inputs: List[Dict[str, torch.Tensor]], do_postprocess: bool = True): - """ - Run inference on the given inputs. - - Args: - batched_inputs (list[dict]): same as in :meth:`forward` - do_postprocess (bool): whether to apply post-processing on the outputs. - - Returns: - When do_postprocess=True, see docs in :meth:`forward`. - Otherwise, returns a (list[Instances], list[Tensor]) that contains - the raw detector outputs, and raw semantic segmentation outputs. - """ - images = self.preprocess_image(batched_inputs) - features = self.backbone(images.tensor) - sem_seg_results, sem_seg_losses = self.sem_seg_head(features, None) - proposals, _ = self.proposal_generator(images, features, None) - detector_results, _ = self.roi_heads(images, features, proposals, None) - - if do_postprocess: - processed_results = [] - for sem_seg_result, detector_result, input_per_image, image_size in zip( - sem_seg_results, detector_results, batched_inputs, images.image_sizes - ): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - sem_seg_r = sem_seg_postprocess(sem_seg_result, image_size, height, width) - detector_r = detector_postprocess(detector_result, height, width) - - processed_results.append({"sem_seg": sem_seg_r, "instances": detector_r}) - - panoptic_r = combine_semantic_and_instance_outputs( - detector_r, - sem_seg_r.argmax(dim=0), - self.combine_overlap_thresh, - self.combine_stuff_area_thresh, - self.combine_instances_score_thresh, - ) - processed_results[-1]["panoptic_seg"] = panoptic_r - return processed_results - else: - return detector_results, sem_seg_results - - -def combine_semantic_and_instance_outputs( - instance_results, - semantic_results, - overlap_threshold, - stuff_area_thresh, - instances_score_thresh, -): - """ - Implement a simple combining logic following - "combine_semantic_and_instance_predictions.py" in panopticapi - to produce panoptic segmentation outputs. - - Args: - instance_results: output of :func:`detector_postprocess`. - semantic_results: an (H, W) tensor, each element is the contiguous semantic - category id - - Returns: - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - panoptic_seg = torch.zeros_like(semantic_results, dtype=torch.int32) - - # sort instance outputs by scores - sorted_inds = torch.argsort(-instance_results.scores) - - current_segment_id = 0 - segments_info = [] - - instance_masks = instance_results.pred_masks.to(dtype=torch.bool, device=panoptic_seg.device) - - # Add instances one-by-one, check for overlaps with existing ones - for inst_id in sorted_inds: - score = instance_results.scores[inst_id].item() - if score < instances_score_thresh: - break - mask = instance_masks[inst_id] # H,W - mask_area = mask.sum().item() - - if mask_area == 0: - continue - - intersect = (mask > 0) & (panoptic_seg > 0) - intersect_area = intersect.sum().item() - - if intersect_area * 1.0 / mask_area > overlap_threshold: - continue - - if intersect_area > 0: - mask = mask & (panoptic_seg == 0) - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - segments_info.append( - { - "id": current_segment_id, - "isthing": True, - "score": score, - "category_id": instance_results.pred_classes[inst_id].item(), - "instance_id": inst_id.item(), - } - ) - - # Add semantic results to remaining empty areas - semantic_labels = torch.unique(semantic_results).cpu().tolist() - for semantic_label in semantic_labels: - if semantic_label == 0: # 0 is a special "thing" class - continue - mask = (semantic_results == semantic_label) & (panoptic_seg == 0) - mask_area = mask.sum().item() - if mask_area < stuff_area_thresh: - continue - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - segments_info.append( - { - "id": current_segment_id, - "isthing": False, - "category_id": semantic_label, - "area": mask_area, - } - ) - - return panoptic_seg, segments_info diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/voc.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/voc.py deleted file mode 100644 index a8855203b14ee0dc4da9099a2945d4aedcffbcd6..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/datasets/voc.py +++ /dev/null @@ -1,29 +0,0 @@ -import os.path as osp - -from .builder import DATASETS -from .custom import CustomDataset - - -@DATASETS.register_module() -class PascalVOCDataset(CustomDataset): - """Pascal VOC dataset. - - Args: - split (str): Split txt file for Pascal VOC. - """ - - CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', - 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', - 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', - 'train', 'tvmonitor') - - PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], - [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], - [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], - [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], - [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] - - def __init__(self, split, **kwargs): - super(PascalVOCDataset, self).__init__( - img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs) - assert osp.exists(self.img_dir) and self.split is not None diff --git a/spaces/Superlang/remove_background/app.py b/spaces/Superlang/remove_background/app.py deleted file mode 100644 index 829136f03f1bdb98c971317089c89924e83aa406..0000000000000000000000000000000000000000 --- a/spaces/Superlang/remove_background/app.py +++ /dev/null @@ -1,36 +0,0 @@ -""" - reference: https://github.com/xuebinqin/DIS -""" - -import os - -import gdown -import gradio as gr - -from DIS.IsNetPipeLine import IsNetPipeLine - -save_model_path = "DIS/save_models" -model_name = os.path.join(save_model_path, "isnet.pth") -# Download official weights -if not os.path.exists(model_name): - if not os.path.exists(save_model_path): - os.mkdir(save_model_path) - MODEL_PATH_URL = "https://huggingface.co/Superlang/ImageProcess/resolve/main/isnet.pth" - gdown.download(MODEL_PATH_URL, model_name, use_cookies=False) - -pipe = IsNetPipeLine(model_path=model_name) - - -def inference(image): - return pipe(image) - - -title = "remove background" -interface = gr.Interface( - fn=inference, - inputs=gr.Image(type='pil'), - outputs=["image", "image"], - title=title, - allow_flagging='never', - cache_examples=True, -).queue(concurrency_count=1, api_open=True).launch(show_api=True, show_error=True) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/__init__.py deleted file mode 100644 index b6beddbe6d24d2949dc89ed07abfebd59d8b63b9..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Expose a limited set of classes and functions so callers outside of -# the vcs package don't need to import deeper than `pip._internal.vcs`. -# (The test directory may still need to import from a vcs sub-package.) -# Import all vcs modules to register each VCS in the VcsSupport object. -import pip._internal.vcs.bazaar -import pip._internal.vcs.git -import pip._internal.vcs.mercurial -import pip._internal.vcs.subversion # noqa: F401 -from pip._internal.vcs.versioncontrol import ( # noqa: F401 - RemoteNotFoundError, - RemoteNotValidError, - is_url, - make_vcs_requirement_url, - vcs, -) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/unistring.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/unistring.py deleted file mode 100644 index 39f6baeedfb8ec129e0076cc3eb94dd5bef92ed0..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/unistring.py +++ /dev/null @@ -1,153 +0,0 @@ -""" - pygments.unistring - ~~~~~~~~~~~~~~~~~~ - - Strings of all Unicode characters of a certain category. - Used for matching in Unicode-aware languages. Run to regenerate. - - Inspired by chartypes_create.py from the MoinMoin project. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -Cc = '\x00-\x1f\x7f-\x9f' - -Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f' - -Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff' - -Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd' - -Cs = '\ud800-\udbff\\\udc00\udc01-\udfff' - -Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943' - -Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1' - -Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' - -Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc' - -Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921' - -Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172' - -Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672' - -Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef' - -Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959' - -Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e' - -No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c' - -Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f' - -Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d' - -Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63' - -Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21' - -Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20' - -Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f" - -Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62' - -Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0' - -Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff' - -Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1' - -So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d' - -Zl = '\u2028' - -Zp = '\u2029' - -Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000' - -xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef' - -xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' - -cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs'] - -# Generated from unidata 11.0.0 - -def combine(*args): - return ''.join(globals()[cat] for cat in args) - - -def allexcept(*args): - newcats = cats[:] - for arg in args: - newcats.remove(arg) - return ''.join(globals()[cat] for cat in newcats) - - -def _handle_runs(char_list): # pragma: no cover - buf = [] - for c in char_list: - if len(c) == 1: - if buf and buf[-1][1] == chr(ord(c)-1): - buf[-1] = (buf[-1][0], c) - else: - buf.append((c, c)) - else: - buf.append((c, c)) - for a, b in buf: - if a == b: - yield a - else: - yield '%s-%s' % (a, b) - - -if __name__ == '__main__': # pragma: no cover - import unicodedata - - categories = {'xid_start': [], 'xid_continue': []} - - with open(__file__, encoding='utf-8') as fp: - content = fp.read() - - header = content[:content.find('Cc =')] - footer = content[content.find("def combine("):] - - for code in range(0x110000): - c = chr(code) - cat = unicodedata.category(c) - if ord(c) == 0xdc00: - # Hack to avoid combining this combining with the preceding high - # surrogate, 0xdbff, when doing a repr. - c = '\\' + c - elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e): - # Escape regex metachars. - c = '\\' + c - categories.setdefault(cat, []).append(c) - # XID_START and XID_CONTINUE are special categories used for matching - # identifiers in Python 3. - if c.isidentifier(): - categories['xid_start'].append(c) - if ('a' + c).isidentifier(): - categories['xid_continue'].append(c) - - with open(__file__, 'w', encoding='utf-8') as fp: - fp.write(header) - - for cat in sorted(categories): - val = ''.join(_handle_runs(categories[cat])) - fp.write('%s = %a\n\n' % (cat, val)) - - cats = sorted(categories) - cats.remove('xid_start') - cats.remove('xid_continue') - fp.write('cats = %r\n\n' % cats) - - fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,)) - - fp.write(footer) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py deleted file mode 100644 index f14faae56e580d3d4762d31273b9f65c5774346b..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -import unittest -import torch - -from detectron2.config import get_cfg -from detectron2.export import scripting_with_instances -from detectron2.layers import ShapeSpec -from detectron2.modeling.backbone import build_backbone -from detectron2.modeling.proposal_generator import RPN, build_proposal_generator -from detectron2.modeling.proposal_generator.proposal_utils import ( - add_ground_truth_to_proposals, - find_top_rpn_proposals, -) -from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes -from detectron2.utils.events import EventStorage - -logger = logging.getLogger(__name__) - - -class RPNTest(unittest.TestCase): - def get_gt_and_features(self): - num_images = 2 - images_tensor = torch.rand(num_images, 20, 30) - image_sizes = [(10, 10), (20, 30)] - images = ImageList(images_tensor, image_sizes) - image_shape = (15, 15) - num_channels = 1024 - features = {"res4": torch.rand(num_images, num_channels, 1, 2)} - gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32) - gt_instances = Instances(image_shape) - gt_instances.gt_boxes = Boxes(gt_boxes) - return (gt_instances, features, images, image_sizes) - - def test_rpn(self): - torch.manual_seed(121) - cfg = get_cfg() - backbone = build_backbone(cfg) - proposal_generator = RPN(cfg, backbone.output_shape()) - (gt_instances, features, images, image_sizes) = self.get_gt_and_features() - with EventStorage(): # capture events in a new storage to discard them - proposals, proposal_losses = proposal_generator( - images, features, [gt_instances[0], gt_instances[1]] - ) - - expected_losses = { - "loss_rpn_cls": torch.tensor(0.08011703193), - "loss_rpn_loc": torch.tensor(0.101470276), - } - for name in expected_losses.keys(): - err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( - name, proposal_losses[name], expected_losses[name] - ) - self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) - - self.assertEqual(len(proposals), len(image_sizes)) - for proposal, im_size in zip(proposals, image_sizes): - self.assertEqual(proposal.image_size, im_size) - - expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]]) - expected_objectness_logit = torch.tensor([0.1596, -0.0007]) - self.assertTrue( - torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4) - ) - self.assertTrue( - torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4) - ) - - def verify_rpn(self, conv_dims, expected_conv_dims): - torch.manual_seed(121) - cfg = get_cfg() - cfg.MODEL.RPN.CONV_DIMS = conv_dims - backbone = build_backbone(cfg) - proposal_generator = RPN(cfg, backbone.output_shape()) - for k, conv in enumerate(proposal_generator.rpn_head.conv): - self.assertEqual(expected_conv_dims[k], conv.out_channels) - return proposal_generator - - def test_rpn_larger_num_convs(self): - conv_dims = [64, 64, 64, 64, 64] - proposal_generator = self.verify_rpn(conv_dims, conv_dims) - (gt_instances, features, images, image_sizes) = self.get_gt_and_features() - with EventStorage(): # capture events in a new storage to discard them - proposals, proposal_losses = proposal_generator( - images, features, [gt_instances[0], gt_instances[1]] - ) - expected_losses = { - "loss_rpn_cls": torch.tensor(0.08122821152), - "loss_rpn_loc": torch.tensor(0.10064548254), - } - for name in expected_losses.keys(): - err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( - name, proposal_losses[name], expected_losses[name] - ) - self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) - - def test_rpn_conv_dims_not_set(self): - conv_dims = [-1, -1, -1] - expected_conv_dims = [1024, 1024, 1024] - self.verify_rpn(conv_dims, expected_conv_dims) - - def test_rpn_scriptability(self): - cfg = get_cfg() - proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval() - num_images = 2 - images_tensor = torch.rand(num_images, 30, 40) - image_sizes = [(32, 32), (30, 40)] - images = ImageList(images_tensor, image_sizes) - features = {"res4": torch.rand(num_images, 1024, 1, 2)} - - fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor} - proposal_generator_ts = scripting_with_instances(proposal_generator, fields) - - proposals, _ = proposal_generator(images, features) - proposals_ts, _ = proposal_generator_ts(images, features) - - for proposal, proposal_ts in zip(proposals, proposals_ts): - self.assertEqual(proposal.image_size, proposal_ts.image_size) - self.assertTrue( - torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor) - ) - self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits)) - - def test_rrpn(self): - torch.manual_seed(121) - cfg = get_cfg() - cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN" - cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator" - cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]] - cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]] - cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]] - cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1) - cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead" - backbone = build_backbone(cfg) - proposal_generator = build_proposal_generator(cfg, backbone.output_shape()) - num_images = 2 - images_tensor = torch.rand(num_images, 20, 30) - image_sizes = [(10, 10), (20, 30)] - images = ImageList(images_tensor, image_sizes) - image_shape = (15, 15) - num_channels = 1024 - features = {"res4": torch.rand(num_images, num_channels, 1, 2)} - gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32) - gt_instances = Instances(image_shape) - gt_instances.gt_boxes = RotatedBoxes(gt_boxes) - with EventStorage(): # capture events in a new storage to discard them - proposals, proposal_losses = proposal_generator( - images, features, [gt_instances[0], gt_instances[1]] - ) - - expected_losses = { - "loss_rpn_cls": torch.tensor(0.04291602224), - "loss_rpn_loc": torch.tensor(0.145077362), - } - for name in expected_losses.keys(): - err_msg = "proposal_losses[{}] = {}, expected losses = {}".format( - name, proposal_losses[name], expected_losses[name] - ) - self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg) - - expected_proposal_box = torch.tensor( - [ - [-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801], - [13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749], - [8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432], - [5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777], - ] - ) - - expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029]) - - torch.set_printoptions(precision=8, sci_mode=False) - - self.assertEqual(len(proposals), len(image_sizes)) - - proposal = proposals[0] - # It seems that there's some randomness in the result across different machines: - # This test can be run on a local machine for 100 times with exactly the same result, - # However, a different machine might produce slightly different results, - # thus the atol here. - err_msg = "computed proposal boxes = {}, expected {}".format( - proposal.proposal_boxes.tensor, expected_proposal_box - ) - self.assertTrue( - torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5), - err_msg, - ) - - err_msg = "computed objectness logits = {}, expected {}".format( - proposal.objectness_logits, expected_objectness_logit - ) - self.assertTrue( - torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5), - err_msg, - ) - - def test_find_rpn_proposals_inf(self): - N, Hi, Wi, A = 3, 3, 3, 3 - proposals = [torch.rand(N, Hi * Wi * A, 4)] - pred_logits = [torch.rand(N, Hi * Wi * A)] - pred_logits[0][1][3:5].fill_(float("inf")) - find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False) - - def test_find_rpn_proposals_tracing(self): - N, Hi, Wi, A = 3, 50, 50, 9 - proposal = torch.rand(N, Hi * Wi * A, 4) - pred_logit = torch.rand(N, Hi * Wi * A) - - def func(proposal, logit, image_size): - r = find_top_rpn_proposals( - [proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False - )[0] - size = r.image_size - if not isinstance(size, torch.Tensor): - size = torch.tensor(size) - return (size, r.proposal_boxes.tensor, r.objectness_logits) - - other_inputs = [] - # test that it generalizes to other shapes - for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]: - other_inputs.append( - ( - torch.rand(N, Hi * Wi * A, 4), - torch.rand(N, Hi * Wi * A), - torch.tensor([shp, shp]), - ) - ) - torch.jit.trace( - func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs - ) - - def test_append_gt_to_proposal(self): - proposals = Instances( - (10, 10), - **{ - "proposal_boxes": Boxes(torch.empty((0, 4))), - "objectness_logits": torch.tensor([]), - "custom_attribute": torch.tensor([]), - } - ) - gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]])) - - self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals]) - - gt_instances = Instances((10, 10)) - gt_instances.gt_boxes = gt_boxes - - self.assertRaises( - AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals] - ) - - gt_instances.custom_attribute = torch.tensor([1]) - gt_instances.custom_attribute2 = torch.tensor([1]) - new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0] - - self.assertEqual(new_proposals.custom_attribute[0], 1) - # new proposals should only include the attributes in proposals - self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/TimVan1/nllb-translation-demo/flores200_codes.py b/spaces/TimVan1/nllb-translation-demo/flores200_codes.py deleted file mode 100644 index c6a3a8e1f8f10935dd0025b4b3264ef1056ca9f2..0000000000000000000000000000000000000000 --- a/spaces/TimVan1/nllb-translation-demo/flores200_codes.py +++ /dev/null @@ -1,211 +0,0 @@ -codes_as_string = '''Acehnese (Arabic script) ace_Arab -Acehnese (Latin script) ace_Latn -Mesopotamian Arabic acm_Arab -Ta’izzi-Adeni Arabic acq_Arab -Tunisian Arabic aeb_Arab -Afrikaans afr_Latn -South Levantine Arabic ajp_Arab -Akan aka_Latn -Amharic amh_Ethi -North Levantine Arabic apc_Arab -Modern Standard Arabic arb_Arab -Modern Standard Arabic (Romanized) arb_Latn -Najdi Arabic ars_Arab -Moroccan Arabic ary_Arab -Egyptian Arabic arz_Arab -Assamese asm_Beng -Asturian ast_Latn -Awadhi awa_Deva -Central Aymara ayr_Latn -South Azerbaijani azb_Arab -North Azerbaijani azj_Latn -Bashkir bak_Cyrl -Bambara bam_Latn -Balinese ban_Latn -Belarusian bel_Cyrl -Bemba bem_Latn -Bengali ben_Beng -Bhojpuri bho_Deva -Banjar (Arabic script) bjn_Arab -Banjar (Latin script) bjn_Latn -Standard Tibetan bod_Tibt -Bosnian bos_Latn -Buginese bug_Latn -Bulgarian bul_Cyrl -Catalan cat_Latn -Cebuano ceb_Latn -Czech ces_Latn -Chokwe cjk_Latn -Central Kurdish ckb_Arab -Crimean Tatar crh_Latn -Welsh cym_Latn -Danish dan_Latn -German deu_Latn -Southwestern Dinka dik_Latn -Dyula dyu_Latn -Dzongkha dzo_Tibt -Greek ell_Grek -English eng_Latn -Esperanto epo_Latn -Estonian est_Latn -Basque eus_Latn -Ewe ewe_Latn -Faroese fao_Latn -Fijian fij_Latn -Finnish fin_Latn -Fon fon_Latn -French fra_Latn -Friulian fur_Latn -Nigerian Fulfulde fuv_Latn -Scottish Gaelic gla_Latn -Irish gle_Latn -Galician glg_Latn -Guarani grn_Latn -Gujarati guj_Gujr -Haitian Creole hat_Latn -Hausa hau_Latn -Hebrew heb_Hebr -Hindi hin_Deva -Chhattisgarhi hne_Deva -Croatian hrv_Latn -Hungarian hun_Latn -Armenian hye_Armn -Igbo ibo_Latn -Ilocano ilo_Latn -Indonesian ind_Latn -Icelandic isl_Latn -Italian ita_Latn -Javanese jav_Latn -Japanese jpn_Jpan -Kabyle kab_Latn -Jingpho kac_Latn -Kamba kam_Latn -Kannada kan_Knda -Kashmiri (Arabic script) kas_Arab -Kashmiri (Devanagari script) kas_Deva -Georgian kat_Geor -Central Kanuri (Arabic script) knc_Arab -Central Kanuri (Latin script) knc_Latn -Kazakh kaz_Cyrl -Kabiyè kbp_Latn -Kabuverdianu kea_Latn -Khmer khm_Khmr -Kikuyu kik_Latn -Kinyarwanda kin_Latn -Kyrgyz kir_Cyrl -Kimbundu kmb_Latn -Northern Kurdish kmr_Latn -Kikongo kon_Latn -Korean kor_Hang -Lao lao_Laoo -Ligurian lij_Latn -Limburgish lim_Latn -Lingala lin_Latn -Lithuanian lit_Latn -Lombard lmo_Latn -Latgalian ltg_Latn -Luxembourgish ltz_Latn -Luba-Kasai lua_Latn -Ganda lug_Latn -Luo luo_Latn -Mizo lus_Latn -Standard Latvian lvs_Latn -Magahi mag_Deva -Maithili mai_Deva -Malayalam mal_Mlym -Marathi mar_Deva -Minangkabau (Arabic script) min_Arab -Minangkabau (Latin script) min_Latn -Macedonian mkd_Cyrl -Plateau Malagasy plt_Latn -Maltese mlt_Latn -Meitei (Bengali script) mni_Beng -Halh Mongolian khk_Cyrl -Mossi mos_Latn -Maori mri_Latn -Burmese mya_Mymr -Dutch nld_Latn -Norwegian Nynorsk nno_Latn -Norwegian Bokmål nob_Latn -Nepali npi_Deva -Northern Sotho nso_Latn -Nuer nus_Latn -Nyanja nya_Latn -Occitan oci_Latn -West Central Oromo gaz_Latn -Odia ory_Orya -Pangasinan pag_Latn -Eastern Panjabi pan_Guru -Papiamento pap_Latn -Western Persian pes_Arab -Polish pol_Latn -Portuguese por_Latn -Dari prs_Arab -Southern Pashto pbt_Arab -Ayacucho Quechua quy_Latn -Romanian ron_Latn -Rundi run_Latn -Russian rus_Cyrl -Sango sag_Latn -Sanskrit san_Deva -Santali sat_Olck -Sicilian scn_Latn -Shan shn_Mymr -Sinhala sin_Sinh -Slovak slk_Latn -Slovenian slv_Latn -Samoan smo_Latn -Shona sna_Latn -Sindhi snd_Arab -Somali som_Latn -Southern Sotho sot_Latn -Spanish spa_Latn -Tosk Albanian als_Latn -Sardinian srd_Latn -Serbian srp_Cyrl -Swati ssw_Latn -Sundanese sun_Latn -Swedish swe_Latn -Swahili swh_Latn -Silesian szl_Latn -Tamil tam_Taml -Tatar tat_Cyrl -Telugu tel_Telu -Tajik tgk_Cyrl -Tagalog tgl_Latn -Thai tha_Thai -Tigrinya tir_Ethi -Tamasheq (Latin script) taq_Latn -Tamasheq (Tifinagh script) taq_Tfng -Tok Pisin tpi_Latn -Tswana tsn_Latn -Tsonga tso_Latn -Turkmen tuk_Latn -Tumbuka tum_Latn -Turkish tur_Latn -Twi twi_Latn -Central Atlas Tamazight tzm_Tfng -Uyghur uig_Arab -Ukrainian ukr_Cyrl -Umbundu umb_Latn -Urdu urd_Arab -Northern Uzbek uzn_Latn -Venetian vec_Latn -Vietnamese vie_Latn -Waray war_Latn -Wolof wol_Latn -Xhosa xho_Latn -Eastern Yiddish ydd_Hebr -Yoruba yor_Latn -Yue Chinese yue_Hant -Chinese (Simplified) zho_Hans -Chinese (Traditional) zho_Hant -Standard Malay zsm_Latn -Zulu zul_Latn''' - -codes_as_string = codes_as_string.split('\n') - -flores_codes = {} -for code in codes_as_string: - lang, lang_code = code.split('\t') - flores_codes[lang] = lang_code diff --git a/spaces/UMich-siads699-fa22-spotamood/spotamood/songs_rec.py b/spaces/UMich-siads699-fa22-spotamood/spotamood/songs_rec.py deleted file mode 100644 index f80486773518b2e098ac0acc031de33c32606567..0000000000000000000000000000000000000000 --- a/spaces/UMich-siads699-fa22-spotamood/spotamood/songs_rec.py +++ /dev/null @@ -1,103 +0,0 @@ -# Import packages -# Data manipulation -import re -import pickle -import math -import numpy as np -import pandas as pd -import nltk -#first time usage: download addtional packages form nltk first: -#nltk.download() -from nltk.tokenize import word_tokenize -from sentence_transformers import SentenceTransformer, util - -# Ranking Generation - -# Helper functions to main ranking function - -# Get closest lyrics lines matches from user text input -def text_get_similar_lyrics_lines(user_text_input, embeddings, arr_lyrics_idx, model): - input_emb = model.encode(user_text_input, convert_to_tensor=True) - res_cos_sim = util.semantic_search(input_emb, embeddings, score_function=util.cos_sim, top_k=100) - # Convert results and mapped lyrics id as pd dataframe - res_df = pd.DataFrame(res_cos_sim[0]) - res_df.rename(columns = {'corpus_id':'lyrics_id'}, inplace = True) - res_df['lyrics_line'] = arr_lyrics_idx[res_df['lyrics_id']] - return res_df - -# For invert indexing // Look up ids of corresponding songs -def lyrics_id_mapping(res_df, arr_lyrics_idx): - arr_lyrics_id = res_df['lyrics_id'].to_numpy() - arr_idx = arr_lyrics_id.astype(int) - arr_song_row_idx = arr_lyrics_idx[arr_idx] - res_df['song_idx'] = arr_song_row_idx - return res_df - -# Suppress utterances which have low similarity scores -def score_low_sim_weighting(df, threshold = 0.6, weight_low_sim = 0.5): - df['score_weighted'] = df['score'].apply(lambda x: x * weight_low_sim if x < threshold else x) - return df - -# Re-rank on songs level based on the sum of lyrics line scores -def songs_ranking(df_results_lyrics_mapped): - res = df_results_lyrics_mapped.groupby('song_idx')['score_weighted'].sum() - res = res.sort_values(ascending=False) - return res - -# Combine songs information to ranked songs -def combine_songs_info(s_songs_ranking, sample_artists_set, valence_range, results_limit = 10): - df_songs_candidates = sample_artists_set.filter(items = s_songs_ranking.index, axis=0) - df_songs_candidates['score'] = s_songs_ranking - df_songs_candidates['song_idx'] = s_songs_ranking - valence_min = valence_range[0] - valence_max = valence_range[1] - df_songs_candidates = df_songs_candidates[df_songs_candidates['valence'].between(valence_min, valence_max)] - res_df = df_songs_candidates[['artist', 'title', 'score']][:results_limit] - return res_df - -# Helper function to support getting songs/ lyrics results - -# Look up relevant lyrics lines an their similarity scores -def lyrics_scores_lookup(song_id, df_results_lyrics_mapped): - res = df_results_lyrics_mapped[df_results_lyrics_mapped['song_idx'] == song_id][['lyrics_line', 'score']] - res = res.sort_values(by=['score'], ascending=False) - return res - -# Generate output on both songs and lyrics level, as a list of dictionaries -def similar_songs_lyrics_ranked(df_results_songs, df_results_lyrics_mapped): - - result_list = [] - - for song_id in df_results_songs.index: - song_title = df_results_songs['title'].loc[song_id] - song_artist = df_results_songs['artist'].loc[song_id] - song_score = df_results_songs['score'].loc[song_id] - song_id = song_id - df_lyrics_scores = lyrics_scores_lookup(song_id, df_results_lyrics_mapped) - # create tuple - t_lyrics = zip(df_lyrics_scores['lyrics_line'], df_lyrics_scores['score']) - # create dictionary for the unique lyrics lines - d_lyrics = dict(zip(df_lyrics_scores['lyrics_line'], df_lyrics_scores['score'])) - dict_object = {"song_id": song_id, "artist":song_artist, "song title":song_title, "song_score":song_score, "lyrics_scores":t_lyrics} - result_list.append(dict_object) - - return result_list - -# Overall function to generate songs ranking based on lyrics lines semantic textual similarity -def similar_songs_ranked(user_input, embeddings, sample_artists_set, lyrics_set, arr_song_idx, valence_range, model): - df_results_lyrics = text_get_similar_lyrics_lines(user_input, embeddings, lyrics_set, model) - df_results_lyrics_mapped = lyrics_id_mapping(df_results_lyrics, arr_song_idx) - df_results_lyrics_mapped = score_low_sim_weighting(df_results_lyrics_mapped) - s_songs_ranking = songs_ranking(df_results_lyrics_mapped) - df_results_songs = combine_songs_info(s_songs_ranking, sample_artists_set, valence_range) - return df_results_songs, df_results_lyrics_mapped - -# Main Function to return songs/lyrics ranking -def main(user_input, embeddings, sample_artists_set, arr_lyrics_idx, arr_song_idx, valence_range, model): - df_results_songs, df_results_lyrics_mapped = similar_songs_ranked(user_input, embeddings, sample_artists_set, arr_lyrics_idx, arr_song_idx, valence_range, model) - result = similar_songs_lyrics_ranked(df_results_songs, df_results_lyrics_mapped) - - return result - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/spaces/Wauplin/bloomz.cpp-converter/README.md b/spaces/Wauplin/bloomz.cpp-converter/README.md deleted file mode 100644 index c40110e8027dcb4ab3a42f8f8dd7299fb9a9e793..0000000000000000000000000000000000000000 --- a/spaces/Wauplin/bloomz.cpp-converter/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Bloom.cpp Converter -emoji: 👁 -colorFrom: green -colorTo: pink -sdk: docker -app_port: 7860 -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/CONTRIBUTING.md b/spaces/Wrathless/Dkrotzer-MusicalMagic/CONTRIBUTING.md deleted file mode 100644 index 55b99140204d785d572ada9761dd77f302ae31c6..0000000000000000000000000000000000000000 --- a/spaces/Wrathless/Dkrotzer-MusicalMagic/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# Contributing to Audiocraft - -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests - -Audiocraft is the implementation of a research paper. -Therefore, we do not plan on accepting many pull requests for new features. -We certainly welcome them for bug fixes. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to encodec, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/train.py b/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/train.py deleted file mode 100644 index bb418ed32473bff1d918b5821ce29deaa69db3d1..0000000000000000000000000000000000000000 --- a/spaces/XS-1/BW_IMAGE_VIDEO_COLORIZER/fastai/train.py +++ /dev/null @@ -1,228 +0,0 @@ -"Provides advanced training extensions to `fastai.basic_train`. Includes half-precision, learning rate finder, mixup, and one-cycle" -from .torch_core import * -from .callbacks import * -from .basic_data import * -from .basic_train import * - -__all__ = ['BnFreeze', 'GradientClipping', 'ShowGraph', 'Interpretation', 'ClassificationInterpretation', 'MultiLabelClassificationInterpretation', - 'fit_one_cycle', 'lr_find', 'one_cycle_scheduler', 'to_fp16', 'to_fp32', 'mixup', 'AccumulateScheduler'] - -def one_cycle_scheduler(lr_max:float, **kwargs:Any)->OneCycleScheduler: - "Instantiate a `OneCycleScheduler` with `lr_max`." - return partial(OneCycleScheduler, lr_max=lr_max, **kwargs) - -def fit_one_cycle(learn:Learner, cyc_len:int, max_lr:Union[Floats,slice]=defaults.lr, - moms:Tuple[float,float]=(0.95,0.85), div_factor:float=25., pct_start:float=0.3, final_div:float=None, - wd:float=None, callbacks:Optional[CallbackList]=None, tot_epochs:int=None, start_epoch:int=None, - batch_multiplier:int=1)->None: - "Fit a model following the 1cycle policy." - max_lr = learn.lr_range(max_lr) - callbacks = listify(callbacks) - callbacks.append(OneCycleScheduler(learn, max_lr, moms=moms, div_factor=div_factor, pct_start=pct_start, - final_div=final_div, tot_epochs=tot_epochs, start_epoch=start_epoch)) - learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks, batch_multiplier=batch_multiplier) - -def lr_find(learn:Learner, start_lr:Floats=1e-7, end_lr:Floats=10, num_it:int=100, stop_div:bool=True, wd:float=None, - batch_multiplier:int=1): - "Explore lr from `start_lr` to `end_lr` over `num_it` iterations in `learn`. If `stop_div`, stops when loss diverges." - start_lr = learn.lr_range(start_lr) - start_lr = np.array(start_lr) if is_listy(start_lr) else start_lr - end_lr = learn.lr_range(end_lr) - end_lr = np.array(end_lr) if is_listy(end_lr) else end_lr - cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div) - epochs = int(np.ceil(num_it/len(learn.data.train_dl))) - learn.fit(epochs, start_lr, callbacks=[cb], wd=wd, batch_multiplier=batch_multiplier) - -def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None, - flat_master:bool=False, max_scale:float=2**24)->Learner: - "Put `learn` in FP16 precision mode." - learn.to_fp32() - learn.model = model2half(learn.model) - learn.data.add_tfm(batch_to_half) - learn.mp_cb = MixedPrecision(learn, loss_scale=loss_scale, max_noskip=max_noskip, dynamic=dynamic, clip=clip, - flat_master=flat_master, max_scale=max_scale) - learn.callbacks.append(learn.mp_cb) - return learn - -def to_fp32(learn:Learner): - "Put `learn` back to FP32 precision mode." - learn.data.remove_tfm(batch_to_half) - for cb in learn.callbacks: - if isinstance(cb, MixedPrecision): learn.callbacks.remove(cb) - learn.model = learn.model.float() - return learn - -def mixup(learn:Learner, alpha:float=0.4, stack_x:bool=False, stack_y:bool=True) -> Learner: - "Add mixup https://arxiv.org/abs/1710.09412 to `learn`." - learn.callback_fns.append(partial(MixUpCallback, alpha=alpha, stack_x=stack_x, stack_y=stack_y)) - return learn - -Learner.fit_one_cycle = fit_one_cycle -Learner.lr_find = lr_find -Learner.to_fp16 = to_fp16 -Learner.to_fp32 = to_fp32 -Learner.mixup = mixup - -class ShowGraph(LearnerCallback): - "Update a graph of learner stats and metrics after each epoch." - def on_epoch_end(self, n_epochs:int, last_metrics:MetricsList, **kwargs)->bool: - "If we have `last_metrics` plot them in our pbar graph" - if last_metrics is not None and last_metrics[0] is not None: - rec = self.learn.recorder - iters = range_of(rec.losses) - val_iter = np.array(rec.nb_batches).cumsum() - x_bounds = (0, (n_epochs - len(rec.nb_batches)) * rec.nb_batches[-1] + len(rec.losses)) - y_bounds = (0, max((max(Tensor(rec.losses)), max(Tensor(rec.val_losses))))) - rec.pbar.update_graph([(iters, rec.losses), (val_iter, rec.val_losses)], x_bounds, y_bounds) - return {} - -class BnFreeze(LearnerCallback): - "Freeze moving average statistics in all non-trainable batchnorm layers." - def on_epoch_begin(self, **kwargs:Any)->None: - "Put bn layers in eval mode just after `model.train()`." - set_bn_eval(self.learn.model) - -class GradientClipping(LearnerCallback): - "Gradient clipping during training." - def __init__(self, learn:Learner, clip:float = 0.): - super().__init__(learn) - self.clip = clip - - def on_backward_end(self, **kwargs): - "Clip the gradient before the optimizer step." - if self.clip: nn.utils.clip_grad_norm_(self.learn.model.parameters(), self.clip) - -def clip_grad(learn:Learner, clip:float=0.1)->Learner: - "Add gradient clipping of `clip` during training." - learn.callback_fns.append(partial(GradientClipping, clip=clip)) - return learn -Learner.clip_grad = clip_grad - -class AccumulateScheduler(LearnerCallback): - "Does accumlated step every nth step by accumulating gradients" - - def __init__(self, learn:Learner, n_step:int = 1, drop_last:bool = False): - super().__init__(learn) - self.n_step,self.drop_last = n_step,drop_last - - def on_train_begin(self, **kwargs): - "check if loss is reduction" - if hasattr(self.loss_func, "reduction") and (self.loss_func.reduction != "sum"): - warn("For better gradients consider 'reduction=sum'") - - def on_epoch_begin(self, **kwargs): - "init samples and batches, change optimizer" - self.acc_samples, self.acc_batches = 0., 0. - - def on_batch_begin(self, last_input, last_target, **kwargs): - "accumulate samples and batches" - self.acc_samples += last_input.shape[0] - self.acc_batches += 1 - - def on_backward_end(self, **kwargs): - "accumulated step and reset samples, True will result in no stepping" - if (self.acc_batches % self.n_step) == 0: - for p in (self.learn.model.parameters()): - if p.requires_grad: p.grad.div_(self.acc_samples) - self.acc_samples = 0 - else: return {'skip_step':True, 'skip_zero':True} - - def on_epoch_end(self, **kwargs): - "step the rest of the accumulated grads if not perfectly divisible" - for p in (self.learn.model.parameters()): - if p.requires_grad: p.grad.div_(self.acc_samples) - if not self.drop_last: self.learn.opt.step() - self.learn.opt.zero_grad() - - -class Interpretation(): - "Interpretation base class, can be inherited for task specific Interpretation classes" - def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid): - self.data,self.preds,self.y_true,self.losses,self.ds_type, self.learn = \ - learn.data,preds,y_true,losses,ds_type,learn - self.ds = (self.data.train_ds if ds_type == DatasetType.Train else - self.data.test_ds if ds_type == DatasetType.Test else - self.data.valid_ds if ds_type == DatasetType.Valid else - self.data.single_ds if ds_type == DatasetType.Single else - self.data.fix_ds) - - @classmethod - def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid, activ:nn.Module=None): - "Gets preds, y_true, losses to construct base class from a learner" - preds_res = learn.get_preds(ds_type=ds_type, activ=activ, with_loss=True) - return cls(learn, *preds_res) - - def top_losses(self, k:int=None, largest=True): - "`k` largest(/smallest) losses and indexes, defaulting to all losses (sorted by `largest`)." - return self.losses.topk(ifnone(k, len(self.losses)), largest=largest) - - # def top_scores(self, metric:Callable=None, k:int=None, largest=True): - # "`k` largest(/smallest) metric scores and indexes, defaulting to all scores (sorted by `largest`)." - # self.scores = metric(self.preds, self.y_true) - # return self.scores.topk(ifnone(k, len(self.scores)), largest=largest) - - -class ClassificationInterpretation(Interpretation): - "Interpretation methods for classification models." - def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid): - super(ClassificationInterpretation, self).__init__(learn,preds,y_true,losses,ds_type) - self.pred_class = self.preds.argmax(dim=1) - - def confusion_matrix(self, slice_size:int=1): - "Confusion matrix as an `np.ndarray`." - x=torch.arange(0,self.data.c) - if slice_size is None: cm = ((self.pred_class==x[:,None]) & (self.y_true==x[:,None,None])).sum(2) - else: - cm = torch.zeros(self.data.c, self.data.c, dtype=x.dtype) - for i in range(0, self.y_true.shape[0], slice_size): - cm_slice = ((self.pred_class[i:i+slice_size]==x[:,None]) - & (self.y_true[i:i+slice_size]==x[:,None,None])).sum(2) - torch.add(cm, cm_slice, out=cm) - return to_np(cm) - - def plot_confusion_matrix(self, normalize:bool=False, title:str='Confusion matrix', cmap:Any="Blues", slice_size:int=1, - norm_dec:int=2, plot_txt:bool=True, return_fig:bool=None, **kwargs)->Optional[plt.Figure]: - "Plot the confusion matrix, with `title` and using `cmap`." - # This function is mainly copied from the sklearn docs - cm = self.confusion_matrix(slice_size=slice_size) - if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] - fig = plt.figure(**kwargs) - plt.imshow(cm, interpolation='nearest', cmap=cmap) - plt.title(title) - tick_marks = np.arange(self.data.c) - plt.xticks(tick_marks, self.data.y.classes, rotation=90) - plt.yticks(tick_marks, self.data.y.classes, rotation=0) - - if plot_txt: - thresh = cm.max() / 2. - for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): - coeff = f'{cm[i, j]:.{norm_dec}f}' if normalize else f'{cm[i, j]}' - plt.text(j, i, coeff, horizontalalignment="center", verticalalignment="center", color="white" if cm[i, j] > thresh else "black") - - plt.tight_layout() - plt.ylabel('Actual') - plt.xlabel('Predicted') - plt.grid(False) - if ifnone(return_fig, defaults.return_fig): return fig - - def most_confused(self, min_val:int=1, slice_size:int=1)->Collection[Tuple[str,str,int]]: - "Sorted descending list of largest non-diagonal entries of confusion matrix, presented as actual, predicted, number of occurrences." - cm = self.confusion_matrix(slice_size=slice_size) - np.fill_diagonal(cm, 0) - res = [(self.data.classes[i],self.data.classes[j],cm[i,j]) - for i,j in zip(*np.where(cm>=min_val))] - return sorted(res, key=itemgetter(2), reverse=True) - - -def _learner_interpret(learn:Learner, ds_type:DatasetType=DatasetType.Valid): - "Create a `ClassificationInterpretation` object from `learner` on `ds_type` with `tta`." - return ClassificationInterpretation.from_learner(learn, ds_type=ds_type) -Learner.interpret = _learner_interpret - -class MultiLabelClassificationInterpretation(Interpretation): - "Interpretation methods for classification models." - def __init__(self, learn:Learner, preds:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid, - sigmoid:bool=True, thresh:float=0.3): - raise NotImplementedError - super(MultiLabelClassificationInterpretation, self).__init__(learn,preds,y_true,losses,ds_type) - self.pred_class = self.preds.sigmoid(dim=1)>thresh if sigmoid else self.preds>thresh diff --git a/spaces/XzJosh/ShanBao-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/ShanBao-Bert-VITS2/text/cleaner.py deleted file mode 100644 index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/ShanBao-Bert-VITS2/text/cleaner.py +++ /dev/null @@ -1,27 +0,0 @@ -from text import chinese, cleaned_text_to_sequence - - -language_module_map = { - 'ZH': chinese -} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - -if __name__ == '__main__': - pass diff --git a/spaces/Y-T-G/Blur-Anything/tracker/model/resnet.py b/spaces/Y-T-G/Blur-Anything/tracker/model/resnet.py deleted file mode 100644 index 3fb45e66f76abd64301b1d03ef35c3512696ac4d..0000000000000000000000000000000000000000 --- a/spaces/Y-T-G/Blur-Anything/tracker/model/resnet.py +++ /dev/null @@ -1,191 +0,0 @@ -""" -resnet.py - A modified ResNet structure -We append extra channels to the first conv by some network surgery -""" - -from collections import OrderedDict -import math - -import torch -import torch.nn as nn -from torch.utils import model_zoo - - -def load_weights_add_extra_dim(target, source_state, extra_dim=1): - new_dict = OrderedDict() - - for k1, v1 in target.state_dict().items(): - if not "num_batches_tracked" in k1: - if k1 in source_state: - tar_v = source_state[k1] - - if v1.shape != tar_v.shape: - # Init the new segmentation channel with zeros - # print(v1.shape, tar_v.shape) - c, _, w, h = v1.shape - pads = torch.zeros((c, extra_dim, w, h), device=tar_v.device) - nn.init.orthogonal_(pads) - tar_v = torch.cat([tar_v, pads], 1) - - new_dict[k1] = tar_v - - target.load_state_dict(new_dict) - - -model_urls = { - "resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth", - "resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth", -} - - -def conv3x3(in_planes, out_planes, stride=1, dilation=1): - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False, - ) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - stride=stride, - dilation=dilation, - padding=dilation, - bias=False, - ) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - def __init__(self, block, layers=(3, 4, 23, 3), extra_dim=0): - self.inplanes = 64 - super(ResNet, self).__init__() - self.conv1 = nn.Conv2d( - 3 + extra_dim, 64, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2.0 / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1, dilation=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False, - ), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [block(self.inplanes, planes, stride, downsample)] - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, dilation=dilation)) - - return nn.Sequential(*layers) - - -def resnet18(pretrained=True, extra_dim=0): - model = ResNet(BasicBlock, [2, 2, 2, 2], extra_dim) - if pretrained: - load_weights_add_extra_dim( - model, model_zoo.load_url(model_urls["resnet18"]), extra_dim - ) - return model - - -def resnet50(pretrained=True, extra_dim=0): - model = ResNet(Bottleneck, [3, 4, 6, 3], extra_dim) - if pretrained: - load_weights_add_extra_dim( - model, model_zoo.load_url(model_urls["resnet50"]), extra_dim - ) - return model diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py deleted file mode 100644 index 2ca1ede262cf5c37a3a54778458c74aff1479411..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py +++ /dev/null @@ -1,14 +0,0 @@ -from .mask_rcnn_R_50_FPN_100ep_LSJ import ( - dataloader, - lr_multiplier, - model, - optimizer, - train, -) - -train.max_iter //= 2 # 100ep -> 50ep - -lr_multiplier.scheduler.milestones = [ - milestone // 2 for milestone in lr_multiplier.scheduler.milestones -] -lr_multiplier.scheduler.num_updates = train.max_iter diff --git a/spaces/YlcldKlns/bing/src/components/ui/dialog.tsx b/spaces/YlcldKlns/bing/src/components/ui/dialog.tsx deleted file mode 100644 index 925e77fe7858fb218b5115b4e225174a886e0f02..0000000000000000000000000000000000000000 --- a/spaces/YlcldKlns/bing/src/components/ui/dialog.tsx +++ /dev/null @@ -1,128 +0,0 @@ -'use client' - -import * as React from 'react' -import * as DialogPrimitive from '@radix-ui/react-dialog' - -import { cn } from '@/lib/utils' -import { IconClose } from '@/components/ui/icons' - -const Dialog = DialogPrimitive.Root - -const DialogTrigger = DialogPrimitive.Trigger - -const DialogPortal = ({ - className, - children, - ...props -}: DialogPrimitive.DialogPortalProps) => ( - -
      - {children} -
      -
      -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
      -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/Yuliang/ECON/lib/torch_utils/ops/fused_bias_act.cpp b/spaces/Yuliang/ECON/lib/torch_utils/ops/fused_bias_act.cpp deleted file mode 100644 index 02be898f970bcc8ea297867fcaa4e71b24b3d949..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ECON/lib/torch_utils/ops/fused_bias_act.cpp +++ /dev/null @@ -1,21 +0,0 @@ -#include - - -torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, - int act, int grad, float alpha, float scale) { - CHECK_CUDA(input); - CHECK_CUDA(bias); - - return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); -} \ No newline at end of file diff --git a/spaces/YuxinJ/Scenimefy/Scenimefy/data/image_folder.py b/spaces/YuxinJ/Scenimefy/Scenimefy/data/image_folder.py deleted file mode 100644 index 2a137d32459367701bcaba3664eb381051a41d88..0000000000000000000000000000000000000000 --- a/spaces/YuxinJ/Scenimefy/Scenimefy/data/image_folder.py +++ /dev/null @@ -1,66 +0,0 @@ -"""A modified image folder class - -We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) -so that this class can load images from both current directory and its subdirectories. -""" - -import torch.utils.data as data - -from PIL import Image -import os -import os.path - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', - '.tif', '.TIF', '.tiff', '.TIFF', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def make_dataset(dir, max_dataset_size=float("inf")): - images = [] - assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir - - for root, _, fnames in sorted(os.walk(dir, followlinks=True)): - for fname in fnames: - if is_image_file(fname): - path = os.path.join(root, fname) - images.append(path) - return images[:min(max_dataset_size, len(images))] - - -def default_loader(path): - return Image.open(path).convert('RGB') - - -class ImageFolder(data.Dataset): - - def __init__(self, root, transform=None, return_paths=False, - loader=default_loader): - imgs = make_dataset(root) - if len(imgs) == 0: - raise(RuntimeError("Found 0 images in: " + root + "\n" - "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) - - self.root = root - self.imgs = imgs - self.transform = transform - self.return_paths = return_paths - self.loader = loader - - def __getitem__(self, index): - path = self.imgs[index] - img = self.loader(path) - if self.transform is not None: - img = self.transform(img) - if self.return_paths: - return img, path - else: - return img - - def __len__(self): - return len(self.imgs) diff --git a/spaces/Zakia/chest_x_ray_pneumonia_predictor/app.py b/spaces/Zakia/chest_x_ray_pneumonia_predictor/app.py deleted file mode 100644 index 477cdaa6eecdf6b02a1d3443a30ab722d2015f87..0000000000000000000000000000000000000000 --- a/spaces/Zakia/chest_x_ray_pneumonia_predictor/app.py +++ /dev/null @@ -1,67 +0,0 @@ -# Bismillahir Rahmaanir Raheem -# Almadadh Ya Gause Radi Allahu Ta'alah Anh - Ameen - - -from fastai.vision.all import * -import gradio as gr - - -def is_pneumonia(x): - return (x.find('virus')!=-1 or x.find('bacteria')!=-1) - - - -# load the trained fast ai model for predictions -learn = load_learner('model.pkl') - - -# define the function to call -categories = ('Pneumonia', 'Normal') - -def predict(img): - pred, idx, probs = learn.predict(img) - return dict(zip(categories, map(float, probs))) - - -title = "Pediatric Pneumonia Chest X-Ray Predictor" - -description = "A pediatric pneumonia chest x-ray predictor model trained on the chest-xray-pneumonia dataset using ResNet18 via fast.ai. The dataset is from: Chest X-Ray Images (Pneumonia) and the associated scientific journal paper is Identifying Medical Diagnoses and Treatable Diseases by Image-Based Deep Learning. The accuracy of the model is: 81.25%" - - -article = "

      Pediatric Pneumonia Chest X-Ray Predictor. Zakia Salod. 2022.

      " - - -image = gr.inputs.Image(shape=(512, 512)) -label = gr.outputs.Label() -examples = [ - ['person1_virus_6.jpeg'], - ['NORMAL2-IM-0285-0001.jpeg'], - ['person82_bacteria_404.jpeg'], - ['NORMAL2-IM-0373-0001.jpeg'], - ['person1618_virus_2805.jpeg'], - ['NORMAL2-IM-0381-0001.jpeg'], - ['person159_bacteria_747.jpeg'], - ['NORMAL2-IM-0222-0001.jpeg'], - ] -interpretation = 'default' -enable_queue = True - - - - -iface = gr.Interface( - fn=predict, - title=title, - description=description, - article=article, - inputs=image, - outputs=label, - theme="default", - examples=examples, - interpretation=interpretation, - enable_queue=enable_queue -) - - - -iface.launch(inline=False) diff --git a/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/snowflake/overview.md b/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/snowflake/overview.md deleted file mode 100644 index 57de66c45bcf4765dd33ebae93299a2b9eaa1d73..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/quick-ingestion-guides/snowflake/overview.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Overview ---- -# Snowflake Ingestion Guide: Overview - -## What You Will Get Out of This Guide - -This guide will help you set up the Snowflake connector to begin ingesting metadata into DataHub. - -Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from Snowflake and load it into DataHub. This will include to following Snowflake asset types: - -* Databases -* Schemas -* Tables -* External Tables -* Views -* Materialized Views - -The pipeline will also extract: - -* **Usage statistics** to help you understand recent query activity (available if using Snowflake Enterprise edition or above) -* **Table- and Column-level lineage** to automatically define interdependencies between datasets and columns (available if using Snowflake Enterprise edition or above) -* **Table-level profile statistics** to help you understand the shape of the data - -:::caution -You will NOT have extracted Stages, Snowpipes, Streams, Tasks, Procedures from Snowflake, as the connector does not support ingesting these assets yet. -::: - -### Caveats - -By default, DataHub only profiles datasets that have changed in the past 1 day. This can be changed in the YAML editor by setting the value of `profile_if_updated_since_days` to something greater than 1. - -Additionally, DataHub only extracts usage and lineage information based on operations performed in the last 1 day. This can be changed by setting a custom value for `start_time` and `end_time` in the YAML editor. - -*To learn more about setting these advanced values, check out the [Snowflake Ingestion Source](https://datahubproject.io/docs/generated/ingestion/sources/snowflake/#module-snowflake).* - -## Next Steps - -If that all sounds like what you're looking for, navigate to the [next page](setup.md), where we'll talk about prerequisites. - -## Advanced Guides and Reference - -If you want to ingest metadata from Snowflake using the DataHub CLI, check out the following resources: - -* Learn about CLI Ingestion in the [Introduction to Metadata Ingestion](../../../metadata-ingestion/README.md) -* [Snowflake Ingestion Source](https://datahubproject.io/docs/generated/ingestion/sources/snowflake/#module-snowflake) - -*Need more help? Join the conversation in [Slack](http://slack.datahubproject.io)!* \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/image/photometric.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/image/photometric.py deleted file mode 100644 index 5085d012019c0cbf56f66f421a378278c1a058ae..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/image/photometric.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import cv2 -import numpy as np - -from ..utils import is_tuple_of -from .colorspace import bgr2gray, gray2bgr - - -def imnormalize(img, mean, std, to_rgb=True): - """Normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - img = img.copy().astype(np.float32) - return imnormalize_(img, mean, std, to_rgb) - - -def imnormalize_(img, mean, std, to_rgb=True): - """Inplace normalize an image with mean and std. - - Args: - img (ndarray): Image to be normalized. - mean (ndarray): The mean to be used for normalize. - std (ndarray): The std to be used for normalize. - to_rgb (bool): Whether to convert to rgb. - - Returns: - ndarray: The normalized image. - """ - # cv2 inplace normalization does not accept uint8 - assert img.dtype != np.uint8 - mean = np.float64(mean.reshape(1, -1)) - stdinv = 1 / np.float64(std.reshape(1, -1)) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - cv2.subtract(img, mean, img) # inplace - cv2.multiply(img, stdinv, img) # inplace - return img - - -def imdenormalize(img, mean, std, to_bgr=True): - assert img.dtype != np.uint8 - mean = mean.reshape(1, -1).astype(np.float64) - std = std.reshape(1, -1).astype(np.float64) - img = cv2.multiply(img, std) # make a copy - cv2.add(img, mean, img) # inplace - if to_bgr: - cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace - return img - - -def iminvert(img): - """Invert (negate) an image. - - Args: - img (ndarray): Image to be inverted. - - Returns: - ndarray: The inverted image. - """ - return np.full_like(img, 255) - img - - -def solarize(img, thr=128): - """Solarize an image (invert all pixel values above a threshold) - - Args: - img (ndarray): Image to be solarized. - thr (int): Threshold for solarizing (0 - 255). - - Returns: - ndarray: The solarized image. - """ - img = np.where(img < thr, img, 255 - img) - return img - - -def posterize(img, bits): - """Posterize an image (reduce the number of bits for each color channel) - - Args: - img (ndarray): Image to be posterized. - bits (int): Number of bits (1 to 8) to use for posterizing. - - Returns: - ndarray: The posterized image. - """ - shift = 8 - bits - img = np.left_shift(np.right_shift(img, shift), shift) - return img - - -def adjust_color(img, alpha=1, beta=None, gamma=0): - r"""It blends the source image and its gray image: - - .. math:: - output = img * alpha + gray\_img * beta + gamma - - Args: - img (ndarray): The input source image. - alpha (int | float): Weight for the source image. Default 1. - beta (int | float): Weight for the converted gray image. - If None, it's assigned the value (1 - `alpha`). - gamma (int | float): Scalar added to each sum. - Same as :func:`cv2.addWeighted`. Default 0. - - Returns: - ndarray: Colored image which has the same size and dtype as input. - """ - gray_img = bgr2gray(img) - gray_img = np.tile(gray_img[..., None], [1, 1, 3]) - if beta is None: - beta = 1 - alpha - colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma) - if not colored_img.dtype == np.uint8: - # Note when the dtype of `img` is not the default `np.uint8` - # (e.g. np.float32), the value in `colored_img` got from cv2 - # is not guaranteed to be in range [0, 255], so here clip - # is needed. - colored_img = np.clip(colored_img, 0, 255) - return colored_img - - -def imequalize(img): - """Equalize the image histogram. - - This function applies a non-linear mapping to the input image, - in order to create a uniform distribution of grayscale values - in the output image. - - Args: - img (ndarray): Image to be equalized. - - Returns: - ndarray: The equalized image. - """ - - def _scale_channel(im, c): - """Scale the data in the corresponding channel.""" - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # For computing the step, filter out the nonzeros. - nonzero_histo = histo[histo > 0] - step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255 - if not step: - lut = np.array(range(256)) - else: - # Compute the cumulative sum, shifted by step // 2 - # and then normalized by step. - lut = (np.cumsum(histo) + (step // 2)) // step - # Shift lut, prepending with 0. - lut = np.concatenate([[0], lut[:-1]], 0) - # handle potential integer overflow - lut[lut > 255] = 255 - # If step is zero, return the original image. - # Otherwise, index from lut. - return np.where(np.equal(step, 0), im, lut[im]) - - # Scales each channel independently and then stacks - # the result. - s1 = _scale_channel(img, 0) - s2 = _scale_channel(img, 1) - s3 = _scale_channel(img, 2) - equalized_img = np.stack([s1, s2, s3], axis=-1) - return equalized_img.astype(img.dtype) - - -def adjust_brightness(img, factor=1.): - """Adjust image brightness. - - This function controls the brightness of an image. An - enhancement factor of 0.0 gives a black image. - A factor of 1.0 gives the original image. This function - blends the source image and the degenerated black image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be brightened. - factor (float): A value controls the enhancement. - Factor 1.0 returns the original image, lower - factors mean less color (brightness, contrast, - etc), and higher values more. Default 1. - - Returns: - ndarray: The brightened image. - """ - degenerated = np.zeros_like(img) - # Note manually convert the dtype to np.float32, to - # achieve as close results as PIL.ImageEnhance.Brightness. - # Set beta=1-factor, and gamma=0 - brightened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - brightened_img = np.clip(brightened_img, 0, 255) - return brightened_img.astype(img.dtype) - - -def adjust_contrast(img, factor=1.): - """Adjust image contrast. - - This function controls the contrast of an image. An - enhancement factor of 0.0 gives a solid grey - image. A factor of 1.0 gives the original image. It - blends the source image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be contrasted. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - - Returns: - ndarray: The contrasted image. - """ - gray_img = bgr2gray(img) - hist = np.histogram(gray_img, 256, (0, 255))[0] - mean = round(np.sum(gray_img) / np.sum(hist)) - degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype) - degenerated = gray2bgr(degenerated) - contrasted_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - contrasted_img = np.clip(contrasted_img, 0, 255) - return contrasted_img.astype(img.dtype) - - -def auto_contrast(img, cutoff=0): - """Auto adjust image contrast. - - This function maximize (normalize) image contrast by first removing cutoff - percent of the lightest and darkest pixels from the histogram and remapping - the image so that the darkest pixel becomes black (0), and the lightest - becomes white (255). - - Args: - img (ndarray): Image to be contrasted. BGR order. - cutoff (int | float | tuple): The cutoff percent of the lightest and - darkest pixels to be removed. If given as tuple, it shall be - (low, high). Otherwise, the single value will be used for both. - Defaults to 0. - - Returns: - ndarray: The contrasted image. - """ - - def _auto_contrast_channel(im, c, cutoff): - im = im[:, :, c] - # Compute the histogram of the image channel. - histo = np.histogram(im, 256, (0, 255))[0] - # Remove cut-off percent pixels from histo - histo_sum = np.cumsum(histo) - cut_low = histo_sum[-1] * cutoff[0] // 100 - cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 - histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low - histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) - - # Compute mapping - low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] - # If all the values have been cut off, return the origin img - if low >= high: - return im - scale = 255.0 / (high - low) - offset = -low * scale - lut = np.array(range(256)) - lut = lut * scale + offset - lut = np.clip(lut, 0, 255) - return lut[im] - - if isinstance(cutoff, (int, float)): - cutoff = (cutoff, cutoff) - else: - assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ - f'float or tuple, but got {type(cutoff)} instead.' - # Auto adjusts contrast for each channel independently and then stacks - # the result. - s1 = _auto_contrast_channel(img, 0, cutoff) - s2 = _auto_contrast_channel(img, 1, cutoff) - s3 = _auto_contrast_channel(img, 2, cutoff) - contrasted_img = np.stack([s1, s2, s3], axis=-1) - return contrasted_img.astype(img.dtype) - - -def adjust_sharpness(img, factor=1., kernel=None): - """Adjust image sharpness. - - This function controls the sharpness of an image. An - enhancement factor of 0.0 gives a blurred image. A - factor of 1.0 gives the original image. And a factor - of 2.0 gives a sharpened image. It blends the source - image and the degenerated mean image: - - .. math:: - output = img * factor + degenerated * (1 - factor) - - Args: - img (ndarray): Image to be sharpened. BGR order. - factor (float): Same as :func:`mmcv.adjust_brightness`. - kernel (np.ndarray, optional): Filter kernel to be applied on the img - to obtain the degenerated img. Defaults to None. - - Note: - No value sanity check is enforced on the kernel set by users. So with - an inappropriate kernel, the ``adjust_sharpness`` may fail to perform - the function its name indicates but end up performing whatever - transform determined by the kernel. - - Returns: - ndarray: The sharpened image. - """ - - if kernel is None: - # adopted from PIL.ImageFilter.SMOOTH - kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 - assert isinstance(kernel, np.ndarray), \ - f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' - assert kernel.ndim == 2, \ - f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' - - degenerated = cv2.filter2D(img, -1, kernel) - sharpened_img = cv2.addWeighted( - img.astype(np.float32), factor, degenerated.astype(np.float32), - 1 - factor, 0) - sharpened_img = np.clip(sharpened_img, 0, 255) - return sharpened_img.astype(img.dtype) - - -def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True): - """AlexNet-style PCA jitter. - - This data augmentation is proposed in `ImageNet Classification with Deep - Convolutional Neural Networks - `_. - - Args: - img (ndarray): Image to be adjusted lighting. BGR order. - eigval (ndarray): the eigenvalue of the convariance matrix of pixel - values, respectively. - eigvec (ndarray): the eigenvector of the convariance matrix of pixel - values, respectively. - alphastd (float): The standard deviation for distribution of alpha. - Defaults to 0.1 - to_rgb (bool): Whether to convert img to rgb. - - Returns: - ndarray: The adjusted image. - """ - assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \ - f'eigval and eigvec should both be of type np.ndarray, got ' \ - f'{type(eigval)} and {type(eigvec)} instead.' - - assert eigval.ndim == 1 and eigvec.ndim == 2 - assert eigvec.shape == (3, eigval.shape[0]) - n_eigval = eigval.shape[0] - assert isinstance(alphastd, float), 'alphastd should be of type float, ' \ - f'got {type(alphastd)} instead.' - - img = img.copy().astype(np.float32) - if to_rgb: - cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace - - alpha = np.random.normal(0, alphastd, n_eigval) - alter = eigvec \ - * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \ - * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval)) - alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape) - img_adjusted = img + alter - return img_adjusted - - -def lut_transform(img, lut_table): - """Transform array by look-up table. - - The function lut_transform fills the output array with values from the - look-up table. Indices of the entries are taken from the input array. - - Args: - img (ndarray): Image to be transformed. - lut_table (ndarray): look-up table of 256 elements; in case of - multi-channel input array, the table should either have a single - channel (in this case the same table is used for all channels) or - the same number of channels as in the input array. - - Returns: - ndarray: The transformed image. - """ - assert isinstance(img, np.ndarray) - assert 0 <= np.min(img) and np.max(img) <= 255 - assert isinstance(lut_table, np.ndarray) - assert lut_table.shape == (256, ) - - return cv2.LUT(np.array(img, dtype=np.uint8), lut_table) - - -def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)): - """Use CLAHE method to process the image. - - See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J]. - Graphics Gems, 1994:474-485.` for more information. - - Args: - img (ndarray): Image to be processed. - clip_limit (float): Threshold for contrast limiting. Default: 40.0. - tile_grid_size (tuple[int]): Size of grid for histogram equalization. - Input image will be divided into equally sized rectangular tiles. - It defines the number of tiles in row and column. Default: (8, 8). - - Returns: - ndarray: The processed image. - """ - assert isinstance(img, np.ndarray) - assert img.ndim == 2 - assert isinstance(clip_limit, (float, int)) - assert is_tuple_of(tile_grid_size, int) - assert len(tile_grid_size) == 2 - - clahe = cv2.createCLAHE(clip_limit, tile_grid_size) - return clahe.apply(np.array(img, dtype=np.uint8)) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/default_runtime.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/default_runtime.py deleted file mode 100644 index b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/configs/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/spaces/abidlabs/mteb-leaderboard/app.py b/spaces/abidlabs/mteb-leaderboard/app.py deleted file mode 100644 index 27a9ade11e829c6cabcd536c5e043a6d283c03b6..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/mteb-leaderboard/app.py +++ /dev/null @@ -1,1926 +0,0 @@ -import json - -from datasets import load_dataset -import gradio as gr -from huggingface_hub import get_hf_file_metadata, HfApi, hf_hub_download, hf_hub_url -from huggingface_hub.repocard import metadata_load -import pandas as pd - -TASKS = [ - "BitextMining", - "Classification", - "Clustering", - "PairClassification", - "Reranking", - "Retrieval", - "STS", - "Summarization", -] - -TASK_LIST_BITEXT_MINING = ['BUCC (de-en)', 'BUCC (fr-en)', 'BUCC (ru-en)', 'BUCC (zh-en)', 'Tatoeba (afr-eng)', 'Tatoeba (amh-eng)', 'Tatoeba (ang-eng)', 'Tatoeba (ara-eng)', 'Tatoeba (arq-eng)', 'Tatoeba (arz-eng)', 'Tatoeba (ast-eng)', 'Tatoeba (awa-eng)', 'Tatoeba (aze-eng)', 'Tatoeba (bel-eng)', 'Tatoeba (ben-eng)', 'Tatoeba (ber-eng)', 'Tatoeba (bos-eng)', 'Tatoeba (bre-eng)', 'Tatoeba (bul-eng)', 'Tatoeba (cat-eng)', 'Tatoeba (cbk-eng)', 'Tatoeba (ceb-eng)', 'Tatoeba (ces-eng)', 'Tatoeba (cha-eng)', 'Tatoeba (cmn-eng)', 'Tatoeba (cor-eng)', 'Tatoeba (csb-eng)', 'Tatoeba (cym-eng)', 'Tatoeba (dan-eng)', 'Tatoeba (deu-eng)', 'Tatoeba (dsb-eng)', 'Tatoeba (dtp-eng)', 'Tatoeba (ell-eng)', 'Tatoeba (epo-eng)', 'Tatoeba (est-eng)', 'Tatoeba (eus-eng)', 'Tatoeba (fao-eng)', 'Tatoeba (fin-eng)', 'Tatoeba (fra-eng)', 'Tatoeba (fry-eng)', 'Tatoeba (gla-eng)', 'Tatoeba (gle-eng)', 'Tatoeba (glg-eng)', 'Tatoeba (gsw-eng)', 'Tatoeba (heb-eng)', 'Tatoeba (hin-eng)', 'Tatoeba (hrv-eng)', 'Tatoeba (hsb-eng)', 'Tatoeba (hun-eng)', 'Tatoeba (hye-eng)', 'Tatoeba (ido-eng)', 'Tatoeba (ile-eng)', 'Tatoeba (ina-eng)', 'Tatoeba (ind-eng)', 'Tatoeba (isl-eng)', 'Tatoeba (ita-eng)', 'Tatoeba (jav-eng)', 'Tatoeba (jpn-eng)', 'Tatoeba (kab-eng)', 'Tatoeba (kat-eng)', 'Tatoeba (kaz-eng)', 'Tatoeba (khm-eng)', 'Tatoeba (kor-eng)', 'Tatoeba (kur-eng)', 'Tatoeba (kzj-eng)', 'Tatoeba (lat-eng)', 'Tatoeba (lfn-eng)', 'Tatoeba (lit-eng)', 'Tatoeba (lvs-eng)', 'Tatoeba (mal-eng)', 'Tatoeba (mar-eng)', 'Tatoeba (max-eng)', 'Tatoeba (mhr-eng)', 'Tatoeba (mkd-eng)', 'Tatoeba (mon-eng)', 'Tatoeba (nds-eng)', 'Tatoeba (nld-eng)', 'Tatoeba (nno-eng)', 'Tatoeba (nob-eng)', 'Tatoeba (nov-eng)', 'Tatoeba (oci-eng)', 'Tatoeba (orv-eng)', 'Tatoeba (pam-eng)', 'Tatoeba (pes-eng)', 'Tatoeba (pms-eng)', 'Tatoeba (pol-eng)', 'Tatoeba (por-eng)', 'Tatoeba (ron-eng)', 'Tatoeba (rus-eng)', 'Tatoeba (slk-eng)', 'Tatoeba (slv-eng)', 'Tatoeba (spa-eng)', 'Tatoeba (sqi-eng)', 'Tatoeba (srp-eng)', 'Tatoeba (swe-eng)', 'Tatoeba (swg-eng)', 'Tatoeba (swh-eng)', 'Tatoeba (tam-eng)', 'Tatoeba (tat-eng)', 'Tatoeba (tel-eng)', 'Tatoeba (tgl-eng)', 'Tatoeba (tha-eng)', 'Tatoeba (tuk-eng)', 'Tatoeba (tur-eng)', 'Tatoeba (tzl-eng)', 'Tatoeba (uig-eng)', 'Tatoeba (ukr-eng)', 'Tatoeba (urd-eng)', 'Tatoeba (uzb-eng)', 'Tatoeba (vie-eng)', 'Tatoeba (war-eng)', 'Tatoeba (wuu-eng)', 'Tatoeba (xho-eng)', 'Tatoeba (yid-eng)', 'Tatoeba (yue-eng)', 'Tatoeba (zsm-eng)'] -TASK_LIST_BITEXT_MINING_OTHER = ["BornholmBitextMining"] - -TASK_LIST_CLASSIFICATION = [ - "AmazonCounterfactualClassification (en)", - "AmazonPolarityClassification", - "AmazonReviewsClassification (en)", - "Banking77Classification", - "EmotionClassification", - "ImdbClassification", - "MassiveIntentClassification (en)", - "MassiveScenarioClassification (en)", - "MTOPDomainClassification (en)", - "MTOPIntentClassification (en)", - "ToxicConversationsClassification", - "TweetSentimentExtractionClassification", -] - -TASK_LIST_CLASSIFICATION_NORM = [x.replace(" (en)", "") for x in TASK_LIST_CLASSIFICATION] - -TASK_LIST_CLASSIFICATION_DA = [ - "AngryTweetsClassification", - "DanishPoliticalCommentsClassification", - "DKHateClassification", - "LccSentimentClassification", - "MassiveIntentClassification (da)", - "MassiveScenarioClassification (da)", - "NordicLangClassification", - "ScalaDaClassification", -] - -TASK_LIST_CLASSIFICATION_NB = [ - "NoRecClassification", - "NordicLangClassification", - "NorwegianParliament", - "MassiveIntentClassification (nb)", - "MassiveScenarioClassification (nb)", - "ScalaNbClassification", -] - -TASK_LIST_CLASSIFICATION_PL = [ - "AllegroReviews", - "CBD", - "MassiveIntentClassification (pl)", - "MassiveScenarioClassification (pl)", - "PAC", - "PolEmo2.0-IN", - "PolEmo2.0-OUT", -] - -TASK_LIST_CLASSIFICATION_SV = [ - "DalajClassification", - "MassiveIntentClassification (sv)", - "MassiveScenarioClassification (sv)", - "NordicLangClassification", - "ScalaSvClassification", - "SweRecClassification", -] - -TASK_LIST_CLASSIFICATION_ZH = [ - "AmazonReviewsClassification (zh)", - "IFlyTek", - "JDReview", - "MassiveIntentClassification (zh-CN)", - "MassiveScenarioClassification (zh-CN)", - "MultilingualSentiment", - "OnlineShopping", - "TNews", - "Waimai", -] - -TASK_LIST_CLASSIFICATION_OTHER = ['AmazonCounterfactualClassification (de)', 'AmazonCounterfactualClassification (ja)', 'AmazonReviewsClassification (de)', 'AmazonReviewsClassification (es)', 'AmazonReviewsClassification (fr)', 'AmazonReviewsClassification (ja)', 'AmazonReviewsClassification (zh)', 'MTOPDomainClassification (de)', 'MTOPDomainClassification (es)', 'MTOPDomainClassification (fr)', 'MTOPDomainClassification (hi)', 'MTOPDomainClassification (th)', 'MTOPIntentClassification (de)', 'MTOPIntentClassification (es)', 'MTOPIntentClassification (fr)', 'MTOPIntentClassification (hi)', 'MTOPIntentClassification (th)', 'MassiveIntentClassification (af)', 'MassiveIntentClassification (am)', 'MassiveIntentClassification (ar)', 'MassiveIntentClassification (az)', 'MassiveIntentClassification (bn)', 'MassiveIntentClassification (cy)', 'MassiveIntentClassification (de)', 'MassiveIntentClassification (el)', 'MassiveIntentClassification (es)', 'MassiveIntentClassification (fa)', 'MassiveIntentClassification (fi)', 'MassiveIntentClassification (fr)', 'MassiveIntentClassification (he)', 'MassiveIntentClassification (hi)', 'MassiveIntentClassification (hu)', 'MassiveIntentClassification (hy)', 'MassiveIntentClassification (id)', 'MassiveIntentClassification (is)', 'MassiveIntentClassification (it)', 'MassiveIntentClassification (ja)', 'MassiveIntentClassification (jv)', 'MassiveIntentClassification (ka)', 'MassiveIntentClassification (km)', 'MassiveIntentClassification (kn)', 'MassiveIntentClassification (ko)', 'MassiveIntentClassification (lv)', 'MassiveIntentClassification (ml)', 'MassiveIntentClassification (mn)', 'MassiveIntentClassification (ms)', 'MassiveIntentClassification (my)', 'MassiveIntentClassification (nl)', 'MassiveIntentClassification (pt)', 'MassiveIntentClassification (ro)', 'MassiveIntentClassification (ru)', 'MassiveIntentClassification (sl)', 'MassiveIntentClassification (sq)', 'MassiveIntentClassification (sw)', 'MassiveIntentClassification (ta)', 'MassiveIntentClassification (te)', 'MassiveIntentClassification (th)', 'MassiveIntentClassification (tl)', 'MassiveIntentClassification (tr)', 'MassiveIntentClassification (ur)', 'MassiveIntentClassification (vi)', 'MassiveIntentClassification (zh-TW)', 'MassiveScenarioClassification (af)', 'MassiveScenarioClassification (am)', 'MassiveScenarioClassification (ar)', 'MassiveScenarioClassification (az)', 'MassiveScenarioClassification (bn)', 'MassiveScenarioClassification (cy)', 'MassiveScenarioClassification (de)', 'MassiveScenarioClassification (el)', 'MassiveScenarioClassification (es)', 'MassiveScenarioClassification (fa)', 'MassiveScenarioClassification (fi)', 'MassiveScenarioClassification (fr)', 'MassiveScenarioClassification (he)', 'MassiveScenarioClassification (hi)', 'MassiveScenarioClassification (hu)', 'MassiveScenarioClassification (hy)', 'MassiveScenarioClassification (id)', 'MassiveScenarioClassification (is)', 'MassiveScenarioClassification (it)', 'MassiveScenarioClassification (ja)', 'MassiveScenarioClassification (jv)', 'MassiveScenarioClassification (ka)', 'MassiveScenarioClassification (km)', 'MassiveScenarioClassification (kn)', 'MassiveScenarioClassification (ko)', 'MassiveScenarioClassification (lv)', 'MassiveScenarioClassification (ml)', 'MassiveScenarioClassification (mn)', 'MassiveScenarioClassification (ms)', 'MassiveScenarioClassification (my)', 'MassiveScenarioClassification (nl)', 'MassiveScenarioClassification (pt)', 'MassiveScenarioClassification (ro)', 'MassiveScenarioClassification (ru)', 'MassiveScenarioClassification (sl)', 'MassiveScenarioClassification (sq)', 'MassiveScenarioClassification (sw)', 'MassiveScenarioClassification (ta)', 'MassiveScenarioClassification (te)', 'MassiveScenarioClassification (th)', 'MassiveScenarioClassification (tl)', 'MassiveScenarioClassification (tr)', 'MassiveScenarioClassification (ur)', 'MassiveScenarioClassification (vi)', 'MassiveScenarioClassification (zh-TW)'] - -TASK_LIST_CLUSTERING = [ - "ArxivClusteringP2P", - "ArxivClusteringS2S", - "BiorxivClusteringP2P", - "BiorxivClusteringS2S", - "MedrxivClusteringP2P", - "MedrxivClusteringS2S", - "RedditClustering", - "RedditClusteringP2P", - "StackExchangeClustering", - "StackExchangeClusteringP2P", - "TwentyNewsgroupsClustering", -] - - -TASK_LIST_CLUSTERING_DE = [ - "BlurbsClusteringP2P", - "BlurbsClusteringS2S", - "TenKGnadClusteringP2P", - "TenKGnadClusteringS2S", -] - -TASK_LIST_CLUSTERING_PL = [ - "8TagsClustering", -] - -TASK_LIST_CLUSTERING_ZH = [ - "CLSClusteringP2P", - "CLSClusteringS2S", - "ThuNewsClusteringP2P", - "ThuNewsClusteringS2S", -] - -TASK_LIST_PAIR_CLASSIFICATION = [ - "SprintDuplicateQuestions", - "TwitterSemEval2015", - "TwitterURLCorpus", -] - -TASK_LIST_PAIR_CLASSIFICATION_PL = [ - "CDSC-E", - "PPC", - "PSC", - "SICK-E-PL", -] - -TASK_LIST_PAIR_CLASSIFICATION_ZH = [ - "Cmnli", - "Ocnli", -] - -TASK_LIST_RERANKING = [ - "AskUbuntuDupQuestions", - "MindSmallReranking", - "SciDocsRR", - "StackOverflowDupQuestions", -] - -TASK_LIST_RERANKING_ZH = [ - "CMedQAv1", - "CMedQAv2", - "MMarcoReranking", - "T2Reranking", -] - -TASK_LIST_RETRIEVAL = [ - "ArguAna", - "ClimateFEVER", - "CQADupstackRetrieval", - "DBPedia", - "FEVER", - "FiQA2018", - "HotpotQA", - "MSMARCO", - "NFCorpus", - "NQ", - "QuoraRetrieval", - "SCIDOCS", - "SciFact", - "Touche2020", - "TRECCOVID", -] - -TASK_LIST_RETRIEVAL_PL = [ - "ArguAna-PL", - "DBPedia-PL", - "FiQA-PL", - "HotpotQA-PL", - "MSMARCO-PL", - "NFCorpus-PL", - "NQ-PL", - "Quora-PL", - "SCIDOCS-PL", - "SciFact-PL", - "TRECCOVID-PL", -] - -TASK_LIST_RETRIEVAL_ZH = [ - "CmedqaRetrieval", - "CovidRetrieval", - "DuRetrieval", - "EcomRetrieval", - "MedicalRetrieval", - "MMarcoRetrieval", - "T2Retrieval", - "VideoRetrieval", -] - -TASK_LIST_RETRIEVAL_NORM = TASK_LIST_RETRIEVAL + [ - "CQADupstackAndroidRetrieval", - "CQADupstackEnglishRetrieval", - "CQADupstackGamingRetrieval", - "CQADupstackGisRetrieval", - "CQADupstackMathematicaRetrieval", - "CQADupstackPhysicsRetrieval", - "CQADupstackProgrammersRetrieval", - "CQADupstackStatsRetrieval", - "CQADupstackTexRetrieval", - "CQADupstackUnixRetrieval", - "CQADupstackWebmastersRetrieval", - "CQADupstackWordpressRetrieval" -] - -TASK_LIST_STS = [ - "BIOSSES", - "SICK-R", - "STS12", - "STS13", - "STS14", - "STS15", - "STS16", - "STS17 (en-en)", - "STS22 (en)", - "STSBenchmark", -] - -TASK_LIST_STS_PL = [ - "CDSC-R", - "SICK-R-PL", - "STS22 (pl)", -] - -TASK_LIST_STS_ZH = [ - "AFQMC", - "ATEC", - "BQ", - "LCQMC", - "PAWSX", - "QBQTC", - "STS22 (zh)", - "STSB", -] - -TASK_LIST_STS_OTHER = ["STS17 (ar-ar)", "STS17 (en-ar)", "STS17 (en-de)", "STS17 (en-tr)", "STS17 (es-en)", "STS17 (es-es)", "STS17 (fr-en)", "STS17 (it-en)", "STS17 (ko-ko)", "STS17 (nl-en)", "STS22 (ar)", "STS22 (de)", "STS22 (de-en)", "STS22 (de-fr)", "STS22 (de-pl)", "STS22 (es)", "STS22 (es-en)", "STS22 (es-it)", "STS22 (fr)", "STS22 (fr-pl)", "STS22 (it)", "STS22 (pl)", "STS22 (pl-en)", "STS22 (ru)", "STS22 (tr)", "STS22 (zh-en)", "STSBenchmark",] -TASK_LIST_STS_NORM = [x.replace(" (en)", "").replace(" (en-en)", "") for x in TASK_LIST_STS] - -TASK_LIST_SUMMARIZATION = ["SummEval",] - -TASK_LIST_EN = TASK_LIST_CLASSIFICATION + TASK_LIST_CLUSTERING + TASK_LIST_PAIR_CLASSIFICATION + TASK_LIST_RERANKING + TASK_LIST_RETRIEVAL + TASK_LIST_STS + TASK_LIST_SUMMARIZATION -TASK_LIST_PL = TASK_LIST_CLASSIFICATION_PL + TASK_LIST_CLUSTERING_PL + TASK_LIST_PAIR_CLASSIFICATION_PL + TASK_LIST_RETRIEVAL_PL + TASK_LIST_STS_PL -TASK_LIST_ZH = TASK_LIST_CLASSIFICATION_ZH + TASK_LIST_CLUSTERING_ZH + TASK_LIST_PAIR_CLASSIFICATION_ZH + TASK_LIST_RERANKING_ZH + TASK_LIST_RETRIEVAL_ZH + TASK_LIST_STS_ZH - -TASK_TO_METRIC = { - "BitextMining": "f1", - "Clustering": "v_measure", - "Classification": "accuracy", - "PairClassification": "cos_sim_ap", - "Reranking": "map", - "Retrieval": "ndcg_at_10", - "STS": "cos_sim_spearman", - "Summarization": "cos_sim_spearman", -} - -def make_clickable_model(model_name, link=None): - if link is None: - link = "https://huggingface.co/" + model_name - # Remove user from model name - return ( - f'{model_name.split("/")[-1]}' - ) - -# Models without metadata, thus we cannot fetch their results naturally -EXTERNAL_MODELS = [ - "all-MiniLM-L12-v2", - "all-MiniLM-L6-v2", - "all-mpnet-base-v2", - "allenai-specter", - "bert-base-swedish-cased", - "bert-base-uncased", - "bge-base-zh-v1.5", - "bge-large-zh-v1.5", - "bge-large-zh-noinstruct", - "bge-small-zh-v1.5", - "contriever-base-msmarco", - "cross-en-de-roberta-sentence-transformer", - "dfm-encoder-large-v1", - "dfm-sentence-encoder-large-1", - "distiluse-base-multilingual-cased-v2", - "DanskBERT", - "e5-base", - "e5-large", - "e5-small", - "electra-small-nordic", - "electra-small-swedish-cased-discriminator", - "gbert-base", - "gbert-large", - "gelectra-base", - "gelectra-large", - "gottbert-base", - "glove.6B.300d", - "gtr-t5-base", - "gtr-t5-large", - "gtr-t5-xl", - "gtr-t5-xxl", - "herbert-base-retrieval-v2", - "komninos", - "luotuo-bert-medium", - "LASER2", - "LaBSE", - "m3e-base", - "m3e-large", - "msmarco-bert-co-condensor", - "multilingual-e5-base", - "multilingual-e5-large", - "multilingual-e5-small", - "nb-bert-base", - "nb-bert-large", - "norbert3-base", - "norbert3-large", - "paraphrase-multilingual-MiniLM-L12-v2", - "paraphrase-multilingual-mpnet-base-v2", - "sentence-bert-swedish-cased", - "sentence-t5-base", - "sentence-t5-large", - "sentence-t5-xl", - "sentence-t5-xxl", - "sup-simcse-bert-base-uncased", - "st-polish-paraphrase-from-distilroberta", - "st-polish-paraphrase-from-mpnet", - "text2vec-base-chinese", - "text2vec-large-chinese", - "text-embedding-ada-002", - "text-similarity-ada-001", - "text-similarity-babbage-001", - "text-similarity-curie-001", - "text-similarity-davinci-001", - "text-search-ada-doc-001", - "text-search-ada-001", - "text-search-babbage-001", - "text-search-curie-001", - "text-search-davinci-001", - "unsup-simcse-bert-base-uncased", - "use-cmlm-multilingual", - "xlm-roberta-base", - "xlm-roberta-large", -] - -EXTERNAL_MODEL_TO_LINK = { - "allenai-specter": "https://huggingface.co/sentence-transformers/allenai-specter", - "allenai-specter": "https://huggingface.co/sentence-transformers/allenai-specter", - "all-MiniLM-L12-v2": "https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2", - "all-MiniLM-L6-v2": "https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2", - "all-mpnet-base-v2": "https://huggingface.co/sentence-transformers/all-mpnet-base-v2", - "bert-base-swedish-cased": "https://huggingface.co/KB/bert-base-swedish-cased", - "bert-base-uncased": "https://huggingface.co/bert-base-uncased", - "bge-base-zh-v1.5": "https://huggingface.co/BAAI/bge-base-zh-v1.5", - "bge-large-zh-v1.5": "https://huggingface.co/BAAI/bge-large-zh-v1.5", - "bge-large-zh-noinstruct": "https://huggingface.co/BAAI/bge-large-zh-noinstruct", - "bge-small-zh-v1.5": "https://huggingface.co/BAAI/bge-small-zh-v1.5", - "contriever-base-msmarco": "https://huggingface.co/nthakur/contriever-base-msmarco", - "cross-en-de-roberta-sentence-transformer": "https://huggingface.co/T-Systems-onsite/cross-en-de-roberta-sentence-transformer", - "DanskBERT": "https://huggingface.co/vesteinn/DanskBERT", - "distiluse-base-multilingual-cased-v2": "https://huggingface.co/sentence-transformers/distiluse-base-multilingual-cased-v2", - "dfm-encoder-large-v1": "https://huggingface.co/chcaa/dfm-encoder-large-v1", - "dfm-sentence-encoder-large-1": "https://huggingface.co/chcaa/dfm-encoder-large-v1", - "e5-base": "https://huggingface.co/intfloat/e5-base", - "e5-large": "https://huggingface.co/intfloat/e5-large", - "e5-small": "https://huggingface.co/intfloat/e5-small", - "electra-small-nordic": "https://huggingface.co/jonfd/electra-small-nordic", - "electra-small-swedish-cased-discriminator": "https://huggingface.co/KBLab/electra-small-swedish-cased-discriminator", - "gbert-base": "https://huggingface.co/deepset/gbert-base", - "gbert-large": "https://huggingface.co/deepset/gbert-large", - "gelectra-base": "https://huggingface.co/deepset/gelectra-base", - "gelectra-large": "https://huggingface.co/deepset/gelectra-large", - "glove.6B.300d": "https://huggingface.co/sentence-transformers/average_word_embeddings_glove.6B.300d", - "gottbert-base": "https://huggingface.co/uklfr/gottbert-base", - "gtr-t5-base": "https://huggingface.co/sentence-transformers/gtr-t5-base", - "gtr-t5-large": "https://huggingface.co/sentence-transformers/gtr-t5-large", - "gtr-t5-xl": "https://huggingface.co/sentence-transformers/gtr-t5-xl", - "gtr-t5-xxl": "https://huggingface.co/sentence-transformers/gtr-t5-xxl", - "herbert-base-retrieval-v2": "https://huggingface.co/ipipan/herbert-base-retrieval-v2", - "komninos": "https://huggingface.co/sentence-transformers/average_word_embeddings_komninos", - "luotuo-bert-medium": "https://huggingface.co/silk-road/luotuo-bert-medium", - "LASER2": "https://github.com/facebookresearch/LASER", - "LaBSE": "https://huggingface.co/sentence-transformers/LaBSE", - "m3e-base": "https://huggingface.co/moka-ai/m3e-base", - "m3e-large": "https://huggingface.co/moka-ai/m3e-large", - "msmarco-bert-co-condensor": "https://huggingface.co/sentence-transformers/msmarco-bert-co-condensor", - "multilingual-e5-base": "https://huggingface.co/intfloat/multilingual-e5-base", - "multilingual-e5-large": "https://huggingface.co/intfloat/multilingual-e5-large", - "multilingual-e5-small": "https://huggingface.co/intfloat/multilingual-e5-small", - "nb-bert-base": "https://huggingface.co/NbAiLab/nb-bert-base", - "nb-bert-large": "https://huggingface.co/NbAiLab/nb-bert-large", - "norbert3-base": "https://huggingface.co/ltg/norbert3-base", - "norbert3-large": "https://huggingface.co/ltg/norbert3-large", - "paraphrase-multilingual-mpnet-base-v2": "https://huggingface.co/sentence-transformers/paraphrase-multilingual-mpnet-base-v2", - "paraphrase-multilingual-MiniLM-L12-v2": "https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2", - "sentence-bert-swedish-cased": "https://huggingface.co/KBLab/sentence-bert-swedish-cased", - "sentence-t5-base": "https://huggingface.co/sentence-transformers/sentence-t5-base", - "sentence-t5-large": "https://huggingface.co/sentence-transformers/sentence-t5-large", - "sentence-t5-xl": "https://huggingface.co/sentence-transformers/sentence-t5-xl", - "sentence-t5-xxl": "https://huggingface.co/sentence-transformers/sentence-t5-xxl", - "sup-simcse-bert-base-uncased": "https://huggingface.co/princeton-nlp/sup-simcse-bert-base-uncased", - "st-polish-paraphrase-from-distilroberta": "https://huggingface.co/sdadas/st-polish-paraphrase-from-distilroberta", - "st-polish-paraphrase-from-mpnet": "https://huggingface.co/sdadas/st-polish-paraphrase-from-mpnet", - "text2vec-base-chinese": "https://huggingface.co/shibing624/text2vec-base-chinese", - "text2vec-large-chinese": "https://huggingface.co/GanymedeNil/text2vec-large-chinese", - "text-embedding-ada-002": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-similarity-ada-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-similarity-babbage-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-similarity-curie-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-similarity-davinci-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-ada-doc-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-ada-query-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-ada-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-curie-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-babbage-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "text-search-davinci-001": "https://beta.openai.com/docs/guides/embeddings/types-of-embedding-models", - "unsup-simcse-bert-base-uncased": "https://huggingface.co/princeton-nlp/unsup-simcse-bert-base-uncased", - "use-cmlm-multilingual": "https://huggingface.co/sentence-transformers/use-cmlm-multilingual", - "xlm-roberta-base": "https://huggingface.co/xlm-roberta-base", - "xlm-roberta-large": "https://huggingface.co/xlm-roberta-large", -} - -EXTERNAL_MODEL_TO_DIM = { - "all-MiniLM-L12-v2": 384, - "all-MiniLM-L6-v2": 384, - "all-mpnet-base-v2": 768, - "allenai-specter": 768, - "bert-base-swedish-cased": 768, - "bert-base-uncased": 768, - "bge-base-zh-v1.5": 768, - "bge-large-zh-v1.5": 1024, - "bge-large-zh-noinstruct": 1024, - "bge-small-zh-v1.5": 512, - "contriever-base-msmarco": 768, - "cross-en-de-roberta-sentence-transformer": 768, - "DanskBERT": 768, - "distiluse-base-multilingual-cased-v2": 512, - "dfm-encoder-large-v1": 1024, - "dfm-sentence-encoder-large-1": 1024, - "e5-base": 768, - "e5-small": 384, - "e5-large": 1024, - "electra-small-nordic": 256, - "electra-small-swedish-cased-discriminator": 256, - "luotuo-bert-medium": 768, - "LASER2": 1024, - "LaBSE": 768, - "gbert-base": 768, - "gbert-large": 1024, - "gelectra-base": 768, - "gelectra-large": 1024, - "glove.6B.300d": 300, - "gottbert-base": 768, - "gtr-t5-base": 768, - "gtr-t5-large": 768, - "gtr-t5-xl": 768, - "gtr-t5-xxl": 768, - "herbert-base-retrieval-v2": 768, - "komninos": 300, - "m3e-base": 768, - "m3e-large": 768, - "msmarco-bert-co-condensor": 768, - "multilingual-e5-base": 768, - "multilingual-e5-small": 384, - "multilingual-e5-large": 1024, - "nb-bert-base": 768, - "nb-bert-large": 1024, - "norbert3-base": 768, - "norbert3-large": 1024, - "paraphrase-multilingual-MiniLM-L12-v2": 384, - "paraphrase-multilingual-mpnet-base-v2": 768, - "sentence-bert-swedish-cased": 768, - "sentence-t5-base": 768, - "sentence-t5-large": 768, - "sentence-t5-xl": 768, - "sentence-t5-xxl": 768, - "sup-simcse-bert-base-uncased": 768, - "st-polish-paraphrase-from-distilroberta": 768, - "st-polish-paraphrase-from-mpnet": 768, - "text2vec-base-chinese": 768, - "text2vec-large-chinese": 1024, - "text-embedding-ada-002": 1536, - "text-similarity-ada-001": 1024, - "text-similarity-babbage-001": 2048, - "text-similarity-curie-001": 4096, - "text-similarity-davinci-001": 12288, - "text-search-ada-doc-001": 1024, - "text-search-ada-query-001": 1024, - "text-search-ada-001": 1024, - "text-search-babbage-001": 2048, - "text-search-curie-001": 4096, - "text-search-davinci-001": 12288, - "unsup-simcse-bert-base-uncased": 768, - "use-cmlm-multilingual": 768, - "xlm-roberta-base": 768, - "xlm-roberta-large": 1024, -} - -EXTERNAL_MODEL_TO_SEQLEN = { - "all-MiniLM-L12-v2": 512, - "all-MiniLM-L6-v2": 512, - "all-mpnet-base-v2": 514, - "allenai-specter": 512, - "bert-base-swedish-cased": 512, - "bert-base-uncased": 512, - "bge-base-zh-v1.5": 512, - "bge-large-zh-v1.5": 512, - "bge-large-zh-noinstruct": 512, - "bge-small-zh-v1.5": 512, - "contriever-base-msmarco": 512, - "cross-en-de-roberta-sentence-transformer": 514, - "DanskBERT": 514, - "dfm-encoder-large-v1": 512, - "dfm-sentence-encoder-large-1": 512, - "distiluse-base-multilingual-cased-v2": 512, - "e5-base": 512, - "e5-large": 512, - "e5-small": 512, - "electra-small-nordic": 512, - "electra-small-swedish-cased-discriminator": 512, - "gbert-base": 512, - "gbert-large": 512, - "gelectra-base": 512, - "gelectra-large": 512, - "gottbert-base": 512, - "glove.6B.300d": "N/A", - "gtr-t5-base": 512, - "gtr-t5-large": 512, - "gtr-t5-xl": 512, - "gtr-t5-xxl": 512, - "herbert-base-retrieval-v2": 514, - "komninos": "N/A", - "luotuo-bert-medium": 512, - "LASER2": "N/A", - "LaBSE": 512, - "m3e-base": 512, - "m3e-large": 512, - "msmarco-bert-co-condensor": 512, - "multilingual-e5-base": 514, - "multilingual-e5-large": 514, - "multilingual-e5-small": 512, - "nb-bert-base": 512, - "nb-bert-large": 512, - "norbert3-base": 512, - "norbert3-large": 512, - "paraphrase-multilingual-MiniLM-L12-v2": 512, - "paraphrase-multilingual-mpnet-base-v2": 514, - "sentence-bert-swedish-cased": 512, - "sentence-t5-base": 512, - "sentence-t5-large": 512, - "sentence-t5-xl": 512, - "sentence-t5-xxl": 512, - "sup-simcse-bert-base-uncased": 512, - "st-polish-paraphrase-from-distilroberta": 514, - "st-polish-paraphrase-from-mpnet": 514, - "text2vec-base-chinese": 512, - "text2vec-large-chinese": 512, - "text-embedding-ada-002": 8191, - "text-similarity-ada-001": 2046, - "text-similarity-babbage-001": 2046, - "text-similarity-curie-001": 2046, - "text-similarity-davinci-001": 2046, - "text-search-ada-doc-001": 2046, - "text-search-ada-query-001": 2046, - "text-search-ada-001": 2046, - "text-search-babbage-001": 2046, - "text-search-curie-001": 2046, - "text-search-davinci-001": 2046, - "use-cmlm-multilingual": 512, - "unsup-simcse-bert-base-uncased": 512, - "xlm-roberta-base": 514, - "xlm-roberta-large": 514, -} - -EXTERNAL_MODEL_TO_SIZE = { - "allenai-specter": 0.44, - "all-MiniLM-L12-v2": 0.13, - "all-MiniLM-L6-v2": 0.09, - "all-mpnet-base-v2": 0.44, - "bert-base-uncased": 0.44, - "bert-base-swedish-cased": 0.50, - "bge-base-zh-v1.5": 0.41, - "bge-large-zh-v1.5": 1.30, - "bge-large-zh-noinstruct": 1.30, - "bge-small-zh-v1.5": 0.10, - "cross-en-de-roberta-sentence-transformer": 1.11, - "contriever-base-msmarco": 0.44, - "DanskBERT": 0.50, - "distiluse-base-multilingual-cased-v2": 0.54, - "dfm-encoder-large-v1": 1.42, - "dfm-sentence-encoder-large-1": 1.63, - "e5-base": 0.44, - "e5-small": 0.13, - "e5-large": 1.34, - "electra-small-nordic": 0.09, - "electra-small-swedish-cased-discriminator": 0.06, - "gbert-base": 0.44, - "gbert-large": 1.35, - "gelectra-base": 0.44, - "gelectra-large": 1.34, - "glove.6B.300d": 0.48, - "gottbert-base": 0.51, - "gtr-t5-base": 0.22, - "gtr-t5-large": 0.67, - "gtr-t5-xl": 2.48, - "gtr-t5-xxl": 9.73, - "herbert-base-retrieval-v2": 0.50, - "komninos": 0.27, - "luotuo-bert-medium": 1.31, - "LASER2": 0.17, - "LaBSE": 1.88, - "m3e-base": 0.41, - "m3e-large": 0.41, - "msmarco-bert-co-condensor": 0.44, - "multilingual-e5-base": 1.11, - "multilingual-e5-small": 0.47, - "multilingual-e5-large": 2.24, - "nb-bert-base": 0.71, - "nb-bert-large": 1.42, - "norbert3-base": 0.52, - "norbert3-large": 1.47, - "paraphrase-multilingual-mpnet-base-v2": 1.11, - "paraphrase-multilingual-MiniLM-L12-v2": 0.47, - "sentence-bert-swedish-cased": 0.50, - "sentence-t5-base": 0.22, - "sentence-t5-large": 0.67, - "sentence-t5-xl": 2.48, - "sentence-t5-xxl": 9.73, - "sup-simcse-bert-base-uncased": 0.44, - "st-polish-paraphrase-from-distilroberta": 0.50, - "st-polish-paraphrase-from-mpnet": 0.50, - "text2vec-base-chinese": 0.41, - "text2vec-large-chinese": 1.30, - "unsup-simcse-bert-base-uncased": 0.44, - "use-cmlm-multilingual": 1.89, - "xlm-roberta-base": 1.12, - "xlm-roberta-large": 2.24, -} - -MODELS_TO_SKIP = { - "baseplate/instructor-large-1", # Duplicate - "radames/e5-large", # Duplicate - "gentlebowl/instructor-large-safetensors", # Duplicate - "Consensus/instructor-base", # Duplicate - "GovCompete/instructor-xl", # Duplicate - "GovCompete/e5-large-v2", # Duplicate - "t12e/instructor-base", # Duplicate - "michaelfeil/ct2fast-e5-large-v2", - "michaelfeil/ct2fast-e5-large", - "michaelfeil/ct2fast-e5-small-v2", - "newsrx/instructor-xl-newsrx", - "newsrx/instructor-large-newsrx", - "fresha/e5-large-v2-endpoint", - "ggrn/e5-small-v2", - "michaelfeil/ct2fast-e5-small", - "jncraton/e5-small-v2-ct2-int8", - "anttip/ct2fast-e5-small-v2-hfie", - "newsrx/instructor-large", - "newsrx/instructor-xl", - "dmlls/all-mpnet-base-v2", - "cgldo/semanticClone", - "Malmuk1/e5-large-v2_Sharded", - "jncraton/gte-small-ct2-int8", - "Einas/einas_ashkar", - "gruber/e5-small-v2-ggml", - "jncraton/bge-small-en-ct2-int8", - "vectoriseai/bge-small-en", - "recipe/embeddings", - "dhairya0907/thenlper-get-large", - "Narsil/bge-base-en", - "kozistr/fused-large-en", - "sionic-ai/sionic-ai-v2", # Wait for https://huggingface.co/sionic-ai/sionic-ai-v2/discussions/1 - "sionic-ai/sionic-ai-v1", # Wait for https://huggingface.co/sionic-ai/sionic-ai-v2/discussions/1 - "BAAI/bge-large-en", # Deprecated in favor of v1.5 - "BAAI/bge-base-en", # Deprecated in favor of v1.5 - "BAAI/bge-small-en", # Deprecated in favor of v1.5 - "d0rj/e5-large-en-ru", - "d0rj/e5-base-en-ru", - "d0rj/e5-small-en-ru", - "aident-ai/bge-base-en-onnx", - "barisaydin/bge-base-en", - "barisaydin/gte-large", - "barisaydin/gte-base", - "barisaydin/gte-small", - "barisaydin/bge-small-en", - "odunola/e5-base-v2", - "goldenrooster/multilingual-e5-large", - "davidpeer/gte-small", - "barisaydin/bge-large-en", - "jamesgpt1/english-large-v1", - "vectoriseai/bge-large-en-v1.5", - "vectoriseai/bge-base-en-v1.5", - "vectoriseai/instructor-large", - "vectoriseai/instructor-base", - "vectoriseai/gte-large", - "vectoriseai/gte-base", - "vectoriseai/e5-large-v2", - "vectoriseai/bge-small-en-v1.5", - "vectoriseai/e5-base-v2", - "vectoriseai/e5-large", - "vectoriseai/multilingual-e5-large", - "vectoriseai/gte-small", - "vectoriseai/ember-v1", - "vectoriseai/e5-base", - "vectoriseai/e5-small-v2", - "michaelfeil/ct2fast-bge-large-en-v1.5", - "michaelfeil/ct2fast-bge-large-en-v1.5", - "michaelfeil/ct2fast-bge-base-en-v1.5", - "michaelfeil/ct2fast-gte-large", - "michaelfeil/ct2fast-gte-base", - "michaelfeil/ct2fast-bge-small-en-v1.5", - "rizki/bgr-tf", - "ef-zulla/e5-multi-sml-torch", -} - -EXTERNAL_MODEL_RESULTS = {model: {k: {v: []} for k, v in TASK_TO_METRIC.items()} for model in EXTERNAL_MODELS} - -def add_lang(examples): - if not(examples["eval_language"]): - examples["mteb_dataset_name_with_lang"] = examples["mteb_dataset_name"] - else: - examples["mteb_dataset_name_with_lang"] = examples["mteb_dataset_name"] + f' ({examples["eval_language"]})' - return examples - -def add_task(examples): - # Could be added to the dataset loading script instead - if examples["mteb_dataset_name"] in TASK_LIST_CLASSIFICATION_NORM + TASK_LIST_CLASSIFICATION_DA + TASK_LIST_CLASSIFICATION_NB + TASK_LIST_CLASSIFICATION_PL + TASK_LIST_CLASSIFICATION_SV + TASK_LIST_CLASSIFICATION_ZH: - examples["mteb_task"] = "Classification" - elif examples["mteb_dataset_name"] in TASK_LIST_CLUSTERING + TASK_LIST_CLUSTERING_DE + TASK_LIST_CLUSTERING_PL + TASK_LIST_CLUSTERING_ZH: - examples["mteb_task"] = "Clustering" - elif examples["mteb_dataset_name"] in TASK_LIST_PAIR_CLASSIFICATION + TASK_LIST_PAIR_CLASSIFICATION_PL + TASK_LIST_PAIR_CLASSIFICATION_ZH: - examples["mteb_task"] = "PairClassification" - elif examples["mteb_dataset_name"] in TASK_LIST_RERANKING + TASK_LIST_RERANKING_ZH: - examples["mteb_task"] = "Reranking" - elif examples["mteb_dataset_name"] in TASK_LIST_RETRIEVAL_NORM + TASK_LIST_RETRIEVAL_PL + TASK_LIST_RETRIEVAL_ZH: - examples["mteb_task"] = "Retrieval" - elif examples["mteb_dataset_name"] in TASK_LIST_STS_NORM + TASK_LIST_STS_PL + TASK_LIST_STS_ZH: - examples["mteb_task"] = "STS" - elif examples["mteb_dataset_name"] in TASK_LIST_SUMMARIZATION: - examples["mteb_task"] = "Summarization" - elif examples["mteb_dataset_name"] in [x.split(" ")[0] for x in TASK_LIST_BITEXT_MINING + TASK_LIST_BITEXT_MINING_OTHER]: - examples["mteb_task"] = "BitextMining" - else: - print("WARNING: Task not found for dataset", examples["mteb_dataset_name"]) - examples["mteb_task"] = "Unknown" - return examples - -for model in EXTERNAL_MODELS: - ds = load_dataset("mteb/results", model) - # For local debugging: - #, download_mode='force_redownload', verification_mode="no_checks") - ds = ds.map(add_lang) - ds = ds.map(add_task) - base_dict = {"Model": make_clickable_model(model, link=EXTERNAL_MODEL_TO_LINK.get(model, "https://huggingface.co/spaces/mteb/leaderboard"))} - # For now only one metric per task - Could add more metrics lateron - for task, metric in TASK_TO_METRIC.items(): - ds_dict = ds.filter(lambda x: (x["mteb_task"] == task) and (x["metric"] == metric))["test"].to_dict() - ds_dict = {k: round(v, 2) for k, v in zip(ds_dict["mteb_dataset_name_with_lang"], ds_dict["score"])} - EXTERNAL_MODEL_RESULTS[model][task][metric].append({**base_dict, **ds_dict}) - -def get_dim_seq_size(model): - filenames = [sib.rfilename for sib in model.siblings] - dim, seq, size = "", "", "" - if "1_Pooling/config.json" in filenames: - st_config_path = hf_hub_download(model.modelId, filename="1_Pooling/config.json") - dim = json.load(open(st_config_path)).get("word_embedding_dimension", "") - elif "2_Pooling/config.json" in filenames: - st_config_path = hf_hub_download(model.modelId, filename="2_Pooling/config.json") - dim = json.load(open(st_config_path)).get("word_embedding_dimension", "") - if "config.json" in filenames: - config_path = hf_hub_download(model.modelId, filename="config.json") - config = json.load(open(config_path)) - if not dim: - dim = config.get("hidden_dim", config.get("hidden_size", config.get("d_model", ""))) - seq = config.get("n_positions", config.get("max_position_embeddings", config.get("n_ctx", config.get("seq_length", "")))) - # Get model file size without downloading - if "pytorch_model.bin" in filenames: - url = hf_hub_url(model.modelId, filename="pytorch_model.bin") - meta = get_hf_file_metadata(url) - size = round(meta.size / 1e9, 2) - elif "pytorch_model.bin.index.json" in filenames: - index_path = hf_hub_download(model.modelId, filename="pytorch_model.bin.index.json") - """ - { - "metadata": { - "total_size": 28272820224 - },.... - """ - size = json.load(open(index_path)) - if ("metadata" in size) and ("total_size" in size["metadata"]): - size = round(size["metadata"]["total_size"] / 1e9, 2) - return dim, seq, size - -def make_datasets_clickable(df): - """Does not work""" - if "BornholmBitextMining" in df.columns: - link = "https://huggingface.co/datasets/strombergnlp/bornholmsk_parallel" - df = df.rename( - columns={f'BornholmBitextMining': 'BornholmBitextMining',}) - return df - -def add_rank(df): - cols_to_rank = [col for col in df.columns if col not in ["Model", "Model Size (GB)", "Embedding Dimensions", "Sequence Length"]] - if len(cols_to_rank) == 1: - df.sort_values(cols_to_rank[0], ascending=False, inplace=True) - else: - df.insert(1, "Average", df[cols_to_rank].mean(axis=1, skipna=False)) - df.sort_values("Average", ascending=False, inplace=True) - df.insert(0, "Rank", list(range(1, len(df) + 1))) - df = df.round(2) - # Fill NaN after averaging - df.fillna("", inplace=True) - return df - -def get_mteb_data(tasks=["Clustering"], langs=[], datasets=[], fillna=True, add_emb_dim=False, task_to_metric=TASK_TO_METRIC, rank=True): - api = HfApi() - models = api.list_models(filter="mteb") - # Initialize list to models that we cannot fetch metadata from - df_list = [] - for model in EXTERNAL_MODEL_RESULTS: - results_list = [res for task in tasks for res in EXTERNAL_MODEL_RESULTS[model][task][task_to_metric[task]]] - if len(datasets) > 0: - res = {k: v for d in results_list for k, v in d.items() if (k == "Model") or any([x in k for x in datasets])} - elif langs: - # Would be cleaner to rely on an extra language column instead - langs_format = [f"({lang})" for lang in langs] - res = {k: v for d in results_list for k, v in d.items() if any([k.split(" ")[-1] in (k, x) for x in langs_format])} - else: - res = {k: v for d in results_list for k, v in d.items()} - # Model & at least one result - if len(res) > 1: - if add_emb_dim: - res["Model Size (GB)"] = EXTERNAL_MODEL_TO_SIZE.get(model, "") - res["Embedding Dimensions"] = EXTERNAL_MODEL_TO_DIM.get(model, "") - res["Sequence Length"] = EXTERNAL_MODEL_TO_SEQLEN.get(model, "") - df_list.append(res) - - for model in models: - if model.modelId in MODELS_TO_SKIP: continue - print("MODEL", model) - readme_path = hf_hub_download(model.modelId, filename="README.md") - meta = metadata_load(readme_path) - # meta['model-index'][0]["results"] is list of elements like: - # { - # "task": {"type": "Classification"}, - # "dataset": { - # "type": "mteb/amazon_massive_intent", - # "name": "MTEB MassiveIntentClassification (nb)", - # "config": "nb", - # "split": "test", - # }, - # "metrics": [ - # {"type": "accuracy", "value": 39.81506388702084}, - # {"type": "f1", "value": 38.809586587791664}, - # ], - # }, - # Use "get" instead of dict indexing to skip incompat metadata instead of erroring out - if len(datasets) > 0: - task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks) and any([x in sub_res.get("dataset", {}).get("name", "") for x in datasets])] - elif langs: - task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks) and (sub_res.get("dataset", {}).get("config", "default") in ("default", *langs))] - else: - task_results = [sub_res for sub_res in meta["model-index"][0]["results"] if (sub_res.get("task", {}).get("type", "") in tasks)] - out = [{res["dataset"]["name"].replace("MTEB ", ""): [round(score["value"], 2) for score in res["metrics"] if score["type"] == task_to_metric.get(res["task"]["type"])][0]} for res in task_results] - out = {k: v for d in out for k, v in d.items()} - out["Model"] = make_clickable_model(model.modelId) - # Model & at least one result - if len(out) > 1: - if add_emb_dim: - out["Embedding Dimensions"], out["Sequence Length"], out["Model Size (GB)"] = get_dim_seq_size(model) - df_list.append(out) - df = pd.DataFrame(df_list) - # If there are any models that are the same, merge them - # E.g. if out["Model"] has the same value in two places, merge & take whichever one is not NaN else just take the first one - df = df.groupby("Model", as_index=False).first() - # Put 'Model' column first - cols = sorted(list(df.columns)) - cols.insert(0, cols.pop(cols.index("Model"))) - df = df[cols] - if rank: - df = add_rank(df) - if fillna: - df.fillna("", inplace=True) - return df - -def get_mteb_average(): - global DATA_OVERALL, DATA_CLASSIFICATION_EN, DATA_CLUSTERING, DATA_PAIR_CLASSIFICATION, DATA_RERANKING, DATA_RETRIEVAL, DATA_STS_EN, DATA_SUMMARIZATION - DATA_OVERALL = get_mteb_data( - tasks=[ - "Classification", - "Clustering", - "PairClassification", - "Reranking", - "Retrieval", - "STS", - "Summarization", - ], - datasets=TASK_LIST_CLASSIFICATION + TASK_LIST_CLUSTERING + TASK_LIST_PAIR_CLASSIFICATION + TASK_LIST_RERANKING + TASK_LIST_RETRIEVAL + TASK_LIST_STS + TASK_LIST_SUMMARIZATION, - fillna=False, - add_emb_dim=True, - rank=False, - ) - # Debugging: - # DATA_OVERALL.to_csv("overall.csv") - - DATA_OVERALL.insert(1, f"Average ({len(TASK_LIST_EN)} datasets)", DATA_OVERALL[TASK_LIST_EN].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(2, f"Classification Average ({len(TASK_LIST_CLASSIFICATION)} datasets)", DATA_OVERALL[TASK_LIST_CLASSIFICATION].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(3, f"Clustering Average ({len(TASK_LIST_CLUSTERING)} datasets)", DATA_OVERALL[TASK_LIST_CLUSTERING].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(4, f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION)} datasets)", DATA_OVERALL[TASK_LIST_PAIR_CLASSIFICATION].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(5, f"Reranking Average ({len(TASK_LIST_RERANKING)} datasets)", DATA_OVERALL[TASK_LIST_RERANKING].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(6, f"Retrieval Average ({len(TASK_LIST_RETRIEVAL)} datasets)", DATA_OVERALL[TASK_LIST_RETRIEVAL].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(7, f"STS Average ({len(TASK_LIST_STS)} datasets)", DATA_OVERALL[TASK_LIST_STS].mean(axis=1, skipna=False)) - DATA_OVERALL.insert(8, f"Summarization Average ({len(TASK_LIST_SUMMARIZATION)} dataset)", DATA_OVERALL[TASK_LIST_SUMMARIZATION].mean(axis=1, skipna=False)) - DATA_OVERALL.sort_values(f"Average ({len(TASK_LIST_EN)} datasets)", ascending=False, inplace=True) - # Start ranking from 1 - DATA_OVERALL.insert(0, "Rank", list(range(1, len(DATA_OVERALL) + 1))) - - DATA_OVERALL = DATA_OVERALL.round(2) - - DATA_CLASSIFICATION_EN = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_CLASSIFICATION]) - # Only keep rows with at least one score in addition to the "Model" & rank column - DATA_CLASSIFICATION_EN = DATA_CLASSIFICATION_EN[DATA_CLASSIFICATION_EN.iloc[:, 2:].ne("").any(axis=1)] - - DATA_CLUSTERING = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_CLUSTERING]) - DATA_CLUSTERING = DATA_CLUSTERING[DATA_CLUSTERING.iloc[:, 2:].ne("").any(axis=1)] - - DATA_PAIR_CLASSIFICATION = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_PAIR_CLASSIFICATION]) - DATA_PAIR_CLASSIFICATION = DATA_PAIR_CLASSIFICATION[DATA_PAIR_CLASSIFICATION.iloc[:, 2:].ne("").any(axis=1)] - - DATA_RERANKING = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_RERANKING]) - DATA_RERANKING = DATA_RERANKING[DATA_RERANKING.iloc[:, 2:].ne("").any(axis=1)] - - DATA_RETRIEVAL = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_RETRIEVAL]) - DATA_RETRIEVAL = DATA_RETRIEVAL[DATA_RETRIEVAL.iloc[:, 2:].ne("").any(axis=1)] - - DATA_STS_EN = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_STS]) - DATA_STS_EN = DATA_STS_EN[DATA_STS_EN.iloc[:, 2:].ne("").any(axis=1)] - - DATA_SUMMARIZATION = add_rank(DATA_OVERALL[["Model"] + TASK_LIST_SUMMARIZATION]) - DATA_SUMMARIZATION = DATA_SUMMARIZATION[DATA_SUMMARIZATION.iloc[:, 1:].ne("").any(axis=1)] - - # Fill NaN after averaging - DATA_OVERALL.fillna("", inplace=True) - - DATA_OVERALL = DATA_OVERALL[["Rank", "Model", "Model Size (GB)", "Embedding Dimensions", "Sequence Length", f"Average ({len(TASK_LIST_EN)} datasets)", f"Classification Average ({len(TASK_LIST_CLASSIFICATION)} datasets)", f"Clustering Average ({len(TASK_LIST_CLUSTERING)} datasets)", f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION)} datasets)", f"Reranking Average ({len(TASK_LIST_RERANKING)} datasets)", f"Retrieval Average ({len(TASK_LIST_RETRIEVAL)} datasets)", f"STS Average ({len(TASK_LIST_STS)} datasets)", f"Summarization Average ({len(TASK_LIST_SUMMARIZATION)} dataset)"]] - DATA_OVERALL = DATA_OVERALL[DATA_OVERALL.iloc[:, 5:].ne("").any(axis=1)] - - return DATA_OVERALL - -def get_mteb_average_zh(): - global DATA_OVERALL_ZH, DATA_CLASSIFICATION_ZH, DATA_CLUSTERING_ZH, DATA_PAIR_CLASSIFICATION_ZH, DATA_RERANKING_ZH, DATA_RETRIEVAL_ZH, DATA_STS_ZH - DATA_OVERALL_ZH = get_mteb_data( - tasks=[ - "Classification", - "Clustering", - "PairClassification", - "Reranking", - "Retrieval", - "STS", - ], - datasets=TASK_LIST_CLASSIFICATION_ZH + TASK_LIST_CLUSTERING_ZH + TASK_LIST_PAIR_CLASSIFICATION_ZH + TASK_LIST_RERANKING_ZH + TASK_LIST_RETRIEVAL_ZH + TASK_LIST_STS_ZH, - fillna=False, - add_emb_dim=True, - rank=False, - ) - # Debugging: - # DATA_OVERALL_ZH.to_csv("overall.csv") - - DATA_OVERALL_ZH.insert(1, f"Average ({len(TASK_LIST_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(2, f"Classification Average ({len(TASK_LIST_CLASSIFICATION_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_CLASSIFICATION_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(3, f"Clustering Average ({len(TASK_LIST_CLUSTERING_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_CLUSTERING_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(4, f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_PAIR_CLASSIFICATION_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(5, f"Reranking Average ({len(TASK_LIST_RERANKING_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_RERANKING_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(6, f"Retrieval Average ({len(TASK_LIST_RETRIEVAL_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_RETRIEVAL_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.insert(7, f"STS Average ({len(TASK_LIST_STS_ZH)} datasets)", DATA_OVERALL_ZH[TASK_LIST_STS_ZH].mean(axis=1, skipna=False)) - DATA_OVERALL_ZH.sort_values(f"Average ({len(TASK_LIST_ZH)} datasets)", ascending=False, inplace=True) - # Start ranking from 1 - DATA_OVERALL_ZH.insert(0, "Rank", list(range(1, len(DATA_OVERALL_ZH) + 1))) - - DATA_OVERALL_ZH = DATA_OVERALL_ZH.round(2) - - DATA_CLASSIFICATION_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_CLASSIFICATION_ZH]) - # Only keep rows with at least one score in addition to the "Model" & rank column - DATA_CLASSIFICATION_ZH = DATA_CLASSIFICATION_ZH[DATA_CLASSIFICATION_ZH.iloc[:, 2:].ne("").any(axis=1)] - - DATA_CLUSTERING_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_CLUSTERING_ZH]) - DATA_CLUSTERING_ZH = DATA_CLUSTERING_ZH[DATA_CLUSTERING_ZH.iloc[:, 2:].ne("").any(axis=1)] - - DATA_PAIR_CLASSIFICATION_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_PAIR_CLASSIFICATION_ZH]) - DATA_PAIR_CLASSIFICATION_ZH = DATA_PAIR_CLASSIFICATION_ZH[DATA_PAIR_CLASSIFICATION_ZH.iloc[:, 2:].ne("").any(axis=1)] - - DATA_RERANKING_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_RERANKING_ZH]) - DATA_RERANKING_ZH = DATA_RERANKING_ZH[DATA_RERANKING_ZH.iloc[:, 2:].ne("").any(axis=1)] - - DATA_RETRIEVAL_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_RETRIEVAL_ZH]) - DATA_RETRIEVAL_ZH = DATA_RETRIEVAL_ZH[DATA_RETRIEVAL_ZH.iloc[:, 2:].ne("").any(axis=1)] - - DATA_STS_ZH = add_rank(DATA_OVERALL_ZH[["Model"] + TASK_LIST_STS_ZH]) - DATA_STS_ZH = DATA_STS_ZH[DATA_STS_ZH.iloc[:, 2:].ne("").any(axis=1)] - - # Fill NaN after averaging - DATA_OVERALL_ZH.fillna("", inplace=True) - - DATA_OVERALL_ZH = DATA_OVERALL_ZH[["Rank", "Model", "Model Size (GB)", "Embedding Dimensions", "Sequence Length", f"Average ({len(TASK_LIST_ZH)} datasets)", f"Classification Average ({len(TASK_LIST_CLASSIFICATION_ZH)} datasets)", f"Clustering Average ({len(TASK_LIST_CLUSTERING_ZH)} datasets)", f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION_ZH)} datasets)", f"Reranking Average ({len(TASK_LIST_RERANKING_ZH)} datasets)", f"Retrieval Average ({len(TASK_LIST_RETRIEVAL_ZH)} datasets)", f"STS Average ({len(TASK_LIST_STS_ZH)} datasets)"]] - DATA_OVERALL_ZH = DATA_OVERALL_ZH[DATA_OVERALL_ZH.iloc[:, 5:].ne("").any(axis=1)] - - return DATA_OVERALL_ZH - -def get_mteb_average_pl(): - global DATA_OVERALL_PL, DATA_CLASSIFICATION_PL, DATA_CLUSTERING_PL, DATA_PAIR_CLASSIFICATION_PL, DATA_RETRIEVAL_PL, DATA_STS_PL - DATA_OVERALL_PL = get_mteb_data( - tasks=[ - "Classification", - "Clustering", - "PairClassification", - "Retrieval", - "STS", - ], - datasets=TASK_LIST_CLASSIFICATION_PL + TASK_LIST_CLUSTERING_PL + TASK_LIST_PAIR_CLASSIFICATION_PL + TASK_LIST_RETRIEVAL_PL + TASK_LIST_STS_PL, - fillna=False, - add_emb_dim=True, - rank=False, - ) - # Debugging: - # DATA_OVERALL_PL.to_csv("overall.csv") - - DATA_OVERALL_PL.insert(1, f"Average ({len(TASK_LIST_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.insert(2, f"Classification Average ({len(TASK_LIST_CLASSIFICATION_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_CLASSIFICATION_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.insert(3, f"Clustering Average ({len(TASK_LIST_CLUSTERING_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_CLUSTERING_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.insert(4, f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_PAIR_CLASSIFICATION_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.insert(5, f"Retrieval Average ({len(TASK_LIST_RETRIEVAL_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_RETRIEVAL_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.insert(6, f"STS Average ({len(TASK_LIST_STS_PL)} datasets)", DATA_OVERALL_PL[TASK_LIST_STS_PL].mean(axis=1, skipna=False)) - DATA_OVERALL_PL.sort_values(f"Average ({len(TASK_LIST_PL)} datasets)", ascending=False, inplace=True) - # Start ranking from 1 - DATA_OVERALL_PL.insert(0, "Rank", list(range(1, len(DATA_OVERALL_PL) + 1))) - - DATA_OVERALL_PL = DATA_OVERALL_PL.round(2) - - DATA_CLASSIFICATION_PL = add_rank(DATA_OVERALL_PL[["Model"] + TASK_LIST_CLASSIFICATION_PL]) - # Only keep rows with at least one score in addition to the "Model" & rank column - DATA_CLASSIFICATION_PL = DATA_CLASSIFICATION_PL[DATA_CLASSIFICATION_PL.iloc[:, 2:].ne("").any(axis=1)] - - DATA_CLUSTERING_PL = add_rank(DATA_OVERALL_PL[["Model"] + TASK_LIST_CLUSTERING_PL]) - DATA_CLUSTERING_PL = DATA_CLUSTERING_PL[DATA_CLUSTERING_PL.iloc[:, 2:].ne("").any(axis=1)] - - DATA_PAIR_CLASSIFICATION_PL = add_rank(DATA_OVERALL_PL[["Model"] + TASK_LIST_PAIR_CLASSIFICATION_PL]) - DATA_PAIR_CLASSIFICATION_PL = DATA_PAIR_CLASSIFICATION_PL[DATA_PAIR_CLASSIFICATION_PL.iloc[:, 2:].ne("").any(axis=1)] - - DATA_RETRIEVAL_PL = add_rank(DATA_OVERALL_PL[["Model"] + TASK_LIST_RETRIEVAL_PL]) - DATA_RETRIEVAL_PL = DATA_RETRIEVAL_PL[DATA_RETRIEVAL_PL.iloc[:, 2:].ne("").any(axis=1)] - - DATA_STS_PL = add_rank(DATA_OVERALL_PL[["Model"] + TASK_LIST_STS_PL]) - DATA_STS_PL = DATA_STS_PL[DATA_STS_PL.iloc[:, 2:].ne("").any(axis=1)] - - # Fill NaN after averaging - DATA_OVERALL_PL.fillna("", inplace=True) - - DATA_OVERALL_PL = DATA_OVERALL_PL[["Rank", "Model", "Model Size (GB)", "Embedding Dimensions", "Sequence Length", f"Average ({len(TASK_LIST_PL)} datasets)", f"Classification Average ({len(TASK_LIST_CLASSIFICATION_PL)} datasets)", f"Clustering Average ({len(TASK_LIST_CLUSTERING_PL)} datasets)", f"Pair Classification Average ({len(TASK_LIST_PAIR_CLASSIFICATION_PL)} datasets)", f"Retrieval Average ({len(TASK_LIST_RETRIEVAL_PL)} datasets)", f"STS Average ({len(TASK_LIST_STS_PL)} datasets)"]] - DATA_OVERALL_PL = DATA_OVERALL_PL[DATA_OVERALL_PL.iloc[:, 5:].ne("").any(axis=1)] - - return DATA_OVERALL_PL - -get_mteb_average() -get_mteb_average_pl() -get_mteb_average_zh() -DATA_BITEXT_MINING = get_mteb_data(["BitextMining"], [], TASK_LIST_BITEXT_MINING) -DATA_BITEXT_MINING_OTHER = get_mteb_data(["BitextMining"], [], TASK_LIST_BITEXT_MINING_OTHER) -DATA_CLASSIFICATION_DA = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_DA) -DATA_CLASSIFICATION_NB = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_NB) -DATA_CLASSIFICATION_SV = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_SV) -DATA_CLASSIFICATION_OTHER = get_mteb_data(["Classification"], [], TASK_LIST_CLASSIFICATION_OTHER) -DATA_CLUSTERING_DE = get_mteb_data(["Clustering"], [], TASK_LIST_CLUSTERING_DE) -DATA_STS_OTHER = get_mteb_data(["STS"], [], TASK_LIST_STS_OTHER) - -# Exact, add all non-nan integer values for every dataset -NUM_SCORES = 0 -DATASETS = [] -MODELS = [] -# LANGUAGES = [] -for d in [ - DATA_BITEXT_MINING, - DATA_BITEXT_MINING_OTHER, - DATA_CLASSIFICATION_EN, - DATA_CLASSIFICATION_DA, - DATA_CLASSIFICATION_NB, - DATA_CLASSIFICATION_PL, - DATA_CLASSIFICATION_SV, - DATA_CLASSIFICATION_ZH, - DATA_CLASSIFICATION_OTHER, - DATA_CLUSTERING, - DATA_CLUSTERING_DE, - DATA_CLUSTERING_PL, - DATA_CLUSTERING_ZH, - DATA_PAIR_CLASSIFICATION, - DATA_PAIR_CLASSIFICATION_PL, - DATA_PAIR_CLASSIFICATION_ZH, - DATA_RERANKING, - DATA_RERANKING_ZH, - DATA_RETRIEVAL, - DATA_RETRIEVAL_PL, - DATA_RETRIEVAL_ZH, - DATA_STS_EN, - DATA_STS_PL, - DATA_STS_ZH, - DATA_STS_OTHER, - DATA_SUMMARIZATION, -]: - # NUM_SCORES += d.iloc[:, 1:].apply(lambda x: sum([1 for y in x if isinstance(y, float) and not np.isnan(y)]), axis=1).sum() - cols_to_ignore = 3 if "Average" in d.columns else 2 - # Count number of scores including only non-nan floats & excluding the rank column - NUM_SCORES += d.iloc[:, cols_to_ignore:].notna().sum().sum() - # Exclude rank & model name column (first two); Do not count different language versions as different datasets - DATASETS += [i.split(" ")[0] for i in d.columns[cols_to_ignore:]] - # LANGUAGES += [i.split(" ")[-1] for i in d.columns[cols_to_ignore:]] - MODELS += d["Model"].tolist() - -NUM_DATASETS = len(set(DATASETS)) -# NUM_LANGUAGES = len(set(LANGUAGES)) -NUM_MODELS = len(set(MODELS)) - -block = gr.Blocks() -with block: - gr.Markdown(f""" - Massive Text Embedding Benchmark (MTEB) Leaderboard. To submit, refer to the MTEB GitHub repository 🤗 Refer to the [MTEB paper](https://arxiv.org/abs/2210.07316) for details on metrics, tasks and models. - - - **Total Datasets**: {NUM_DATASETS} - - **Total Languages**: 113 - - **Total Scores**: {NUM_SCORES} - - **Total Models**: {NUM_MODELS} - """) - with gr.Tabs(): - with gr.TabItem("Overall"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Overall MTEB English leaderboard 🔮** - - - **Metric:** Various, refer to task tabs - - **Languages:** English - """) - with gr.Row(): - data_overall = gr.components.Dataframe( - DATA_OVERALL, - datatype=["number", "markdown"] + ["number"] * len(DATA_OVERALL.columns), - type="pandas", - wrap=True, - ) - with gr.Row(): - data_run_overall = gr.Button("Refresh") - data_run_overall.click(get_mteb_average, inputs=None, outputs=data_overall) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Overall MTEB Chinese leaderboard (C-MTEB) 🔮🇨🇳** - - - **Metric:** Various, refer to task tabs - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_overall_zh = gr.components.Dataframe( - DATA_OVERALL_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_OVERALL_ZH.columns), - type="pandas", - wrap=True, - ) - with gr.Row(): - data_run_overall_zh = gr.Button("Refresh") - data_run_overall_zh.click(get_mteb_average_zh, inputs=None, outputs=data_overall_zh) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **Overall MTEB Polish leaderboard (PL-MTEB) 🔮🇵🇱** - - - **Metric:** Various, refer to task tabs - - **Languages:** Polish - - **Credits:** [Rafał Poświata](https://github.com/rafalposwiata), [Konrad Wojtasik](https://github.com/kwojtasi) & [BEIR-PL](https://arxiv.org/abs/2305.19840) - """) - with gr.Row(): - data_overall_pl = gr.components.Dataframe( - DATA_OVERALL_PL, - datatype=["number", "markdown"] + ["number"] * len(DATA_OVERALL_PL.columns), - type="pandas", - wrap=True, - ) - with gr.Row(): - data_run_overall_pl = gr.Button("Refresh") - data_run_overall_pl.click(get_mteb_average_pl, inputs=None, outputs=data_overall_pl) - with gr.TabItem("Bitext Mining"): - with gr.TabItem("English-X"): - with gr.Row(): - gr.Markdown(""" - **Bitext Mining English-X Leaderboard 🎌** - - - **Metric:** [F1](https://huggingface.co/spaces/evaluate-metric/f1) - - **Languages:** 117 (Pairs of: English & other language) - """) - with gr.Row(): - data_bitext_mining = gr.components.Dataframe( - DATA_BITEXT_MINING, - datatype=["number", "markdown"] + ["number"] * len(DATA_BITEXT_MINING.columns), - type="pandas", - ) - with gr.Row(): - data_run_bitext_mining = gr.Button("Refresh") - task_bitext_mining = gr.Variable(value=["BitextMining"]) - lang_bitext_mining = gr.Variable(value=[]) - datasets_bitext_mining = gr.Variable(value=TASK_LIST_BITEXT_MINING) - data_run_bitext_mining.click( - get_mteb_data, - inputs=[task_bitext_mining, lang_bitext_mining, datasets_bitext_mining], - outputs=data_bitext_mining, - ) - with gr.TabItem("Danish"): - with gr.Row(): - gr.Markdown(""" - **Bitext Mining Danish Leaderboard 🎌🇩🇰** - - - **Metric:** [F1](https://huggingface.co/spaces/evaluate-metric/f1) - - **Languages:** Danish & Bornholmsk (Danish Dialect) - - **Credits:** [Kenneth Enevoldsen](https://github.com/KennethEnevoldsen), [scandinavian-embedding-benchmark](https://kennethenevoldsen.github.io/scandinavian-embedding-benchmark/) - """) - with gr.Row(): - data_bitext_mining_da = gr.components.Dataframe( - DATA_BITEXT_MINING_OTHER, - datatype=["number", "markdown"] + ["number"] * len(DATA_BITEXT_MINING_OTHER.columns), - type="pandas", - ) - with gr.Row(): - data_run_bitext_mining_da = gr.Button("Refresh") - task_bitext_mining_da = gr.Variable(value=["BitextMining"]) - lang_bitext_mining_da = gr.Variable(value=[]) - datasets_bitext_mining_da = gr.Variable(value=TASK_LIST_BITEXT_MINING_OTHER) - data_run_bitext_mining_da.click( - get_mteb_data, - inputs=[ - task_bitext_mining_da, - lang_bitext_mining_da, - datasets_bitext_mining_da, - ], - outputs=data_bitext_mining_da, - ) - with gr.TabItem("Classification"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Classification English Leaderboard ❤️** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** English - """) - with gr.Row(): - data_classification_en = gr.components.Dataframe( - DATA_CLASSIFICATION_EN, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_EN.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_en = gr.Button("Refresh") - task_classification_en = gr.Variable(value=["Classification"]) - lang_classification_en = gr.Variable(value=["en"]) - data_run_classification_en.click( - get_mteb_data, - inputs=[ - task_classification_en, - lang_classification_en, - ], - outputs=data_classification_en, - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Classification Chinese Leaderboard 🧡🇨🇳** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_classification_zh = gr.components.Dataframe( - DATA_CLASSIFICATION_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_ZH.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_zh = gr.Button("Refresh") - task_classification_zh = gr.Variable(value=["Classification"]) - lang_classification_zh = gr.Variable([]) - datasets_classification_zh = gr.Variable(value=TASK_LIST_CLASSIFICATION_ZH) - data_run_classification_zh.click( - get_mteb_data, - inputs=[ - task_classification_zh, - lang_classification_zh, - datasets_classification_zh, - ], - outputs=data_classification_zh, - ) - with gr.TabItem("Danish"): - with gr.Row(): - gr.Markdown(""" - **Classification Danish Leaderboard 🤍🇩🇰** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** Danish - - **Credits:** [Kenneth Enevoldsen](https://github.com/KennethEnevoldsen), [scandinavian-embedding-benchmark](https://kennethenevoldsen.github.io/scandinavian-embedding-benchmark/) - """) - with gr.Row(): - data_classification_da = gr.components.Dataframe( - DATA_CLASSIFICATION_DA, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_DA.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_da = gr.Button("Refresh") - task_classification_da = gr.Variable(value=["Classification"]) - lang_classification_da = gr.Variable(value=[]) - datasets_classification_da = gr.Variable(value=TASK_LIST_CLASSIFICATION_DA) - data_run_classification_da.click( - get_mteb_data, - inputs=[ - task_classification_da, - lang_classification_da, - datasets_classification_da, - ], - outputs=data_classification_da, - ) - with gr.TabItem("Norwegian"): - with gr.Row(): - gr.Markdown(""" - **Classification Norwegian Leaderboard 💙🇳🇴** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** Norwegian Bokmål - - **Credits:** [Kenneth Enevoldsen](https://github.com/KennethEnevoldsen), [scandinavian-embedding-benchmark](https://kennethenevoldsen.github.io/scandinavian-embedding-benchmark/) - """) - with gr.Row(): - data_classification_nb = gr.components.Dataframe( - DATA_CLASSIFICATION_NB, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_NB.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_nb = gr.Button("Refresh") - task_classification_nb = gr.Variable(value=["Classification"]) - lang_classification_nb = gr.Variable(value=[]) - datasets_classification_nb = gr.Variable(value=TASK_LIST_CLASSIFICATION_NB) - data_run_classification_nb.click( - get_mteb_data, - inputs=[ - task_classification_nb, - lang_classification_nb, - datasets_classification_nb, - ], - outputs=data_classification_nb, - ) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **Classification Polish Leaderboard 🤍🇵🇱** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** Polish - - **Credits:** [Rafał Poświata](https://github.com/rafalposwiata) - """) - with gr.Row(): - data_classification_pl = gr.components.Dataframe( - DATA_CLASSIFICATION_PL, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_PL.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_pl = gr.Button("Refresh") - task_classification_pl = gr.Variable(value=["Classification"]) - lang_classification_pl = gr.Variable(value=[]) - datasets_classification_pl = gr.Variable(value=TASK_LIST_CLASSIFICATION_PL) - data_run_classification_pl.click( - get_mteb_data, - inputs=[ - task_classification_pl, - lang_classification_pl, - datasets_classification_pl, - ], - outputs=data_classification_pl, - ) - with gr.TabItem("Swedish"): - with gr.Row(): - gr.Markdown(""" - **Classification Swedish Leaderboard 💛🇸🇪** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** Swedish - - **Credits:** [Kenneth Enevoldsen](https://github.com/KennethEnevoldsen), [scandinavian-embedding-benchmark](https://kennethenevoldsen.github.io/scandinavian-embedding-benchmark/) - """) - with gr.Row(): - data_classification_sv = gr.components.Dataframe( - DATA_CLASSIFICATION_SV, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_SV.columns), - type="pandas", - ) - with gr.Row(): - data_run_classification_sv = gr.Button("Refresh") - task_classification_sv = gr.Variable(value=["Classification"]) - lang_classification_sv = gr.Variable(value=[]) - datasets_classification_sv = gr.Variable(value=TASK_LIST_CLASSIFICATION_SV) - data_run_classification_sv.click( - get_mteb_data, - inputs=[ - task_classification_sv, - lang_classification_sv, - datasets_classification_sv, - ], - outputs=data_classification_sv, - ) - with gr.TabItem("Other"): - with gr.Row(): - gr.Markdown(""" - **Classification Other Languages Leaderboard 💜💚💙** - - - **Metric:** [Accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) - - **Languages:** 47 (Only languages not included in the other tabs) - """) - with gr.Row(): - data_classification = gr.components.Dataframe( - DATA_CLASSIFICATION_OTHER, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLASSIFICATION_OTHER) * 10, - type="pandas", - ) - with gr.Row(): - data_run_classification = gr.Button("Refresh") - task_classification = gr.Variable(value=["Classification"]) - lang_classification = gr.Variable(value=[]) - datasets_classification = gr.Variable(value=TASK_LIST_CLASSIFICATION_OTHER) - data_run_classification.click( - get_mteb_data, - inputs=[ - task_classification, - lang_classification, - datasets_classification, - ], - outputs=data_classification, - ) - with gr.TabItem("Clustering"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Clustering Leaderboard ✨** - - - **Metric:** Validity Measure (v_measure) - - **Languages:** English - """) - with gr.Row(): - data_clustering = gr.components.Dataframe( - DATA_CLUSTERING, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLUSTERING.columns), - type="pandas", - ) - with gr.Row(): - data_run_clustering_en = gr.Button("Refresh") - task_clustering = gr.Variable(value=["Clustering"]) - lang_clustering = gr.Variable(value=[]) - datasets_clustering = gr.Variable(value=TASK_LIST_CLUSTERING) - data_run_clustering_en.click( - get_mteb_data, - inputs=[task_clustering, lang_clustering, datasets_clustering], - outputs=data_clustering, - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Clustering Chinese Leaderboard ✨🇨🇳** - - - **Metric:** Validity Measure (v_measure) - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_clustering_zh = gr.components.Dataframe( - DATA_CLUSTERING_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLUSTERING_ZH.columns), - type="pandas", - ) - with gr.Row(): - data_run_clustering_zh = gr.Button("Refresh") - task_clustering_zh = gr.Variable(value=["Clustering"]) - lang_clustering_zh = gr.Variable(value=[]) - datasets_clustering_zh = gr.Variable(value=TASK_LIST_CLUSTERING_ZH) - data_run_clustering_zh.click( - get_mteb_data, - inputs=[task_clustering_zh, lang_clustering_zh, datasets_clustering_zh], - outputs=data_clustering_zh, - ) - with gr.TabItem("German"): - with gr.Row(): - gr.Markdown(""" - **Clustering German Leaderboard ✨🇩🇪** - - - **Metric:** Validity Measure (v_measure) - - **Languages:** German - - **Credits:** [Silvan](https://github.com/slvnwhrl) - """) - with gr.Row(): - data_clustering_de = gr.components.Dataframe( - DATA_CLUSTERING_DE, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLUSTERING_DE.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_clustering_de = gr.Button("Refresh") - task_clustering_de = gr.Variable(value=["Clustering"]) - lang_clustering_de = gr.Variable(value=[]) - datasets_clustering_de = gr.Variable(value=TASK_LIST_CLUSTERING_DE) - data_run_clustering_de.click( - get_mteb_data, - inputs=[task_clustering_de, lang_clustering_de, datasets_clustering_de], - outputs=data_clustering_de, - ) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **Clustering Polish Leaderboard ✨🇵🇱** - - - **Metric:** Validity Measure (v_measure) - - **Languages:** Polish - - **Credits:** [Rafał Poświata](https://github.com/rafalposwiata) - """) - with gr.Row(): - data_clustering_pl = gr.components.Dataframe( - DATA_CLUSTERING_PL, - datatype=["number", "markdown"] + ["number"] * len(DATA_CLUSTERING_PL.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_clustering_pl = gr.Button("Refresh") - task_clustering_pl = gr.Variable(value=["Clustering"]) - lang_clustering_pl = gr.Variable(value=[]) - datasets_clustering_pl = gr.Variable(value=TASK_LIST_CLUSTERING_PL) - data_run_clustering_pl.click( - get_mteb_data, - inputs=[task_clustering_pl, lang_clustering_pl, datasets_clustering_pl], - outputs=data_clustering_pl, - ) - with gr.TabItem("Pair Classification"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Pair Classification English Leaderboard 🎭** - - - **Metric:** Average Precision based on Cosine Similarities (cos_sim_ap) - - **Languages:** English - """) - with gr.Row(): - data_pair_classification = gr.components.Dataframe( - DATA_PAIR_CLASSIFICATION, - datatype=["number", "markdown"] + ["number"] * len(DATA_PAIR_CLASSIFICATION.columns), - type="pandas", - ) - with gr.Row(): - data_run_pair_classification = gr.Button("Refresh") - task_pair_classification = gr.Variable(value=["PairClassification"]) - lang_pair_classification = gr.Variable(value=[]) - datasets_pair_classification = gr.Variable(value=TASK_LIST_PAIR_CLASSIFICATION) - data_run_pair_classification.click( - get_mteb_data, - inputs=[ - task_pair_classification, - lang_pair_classification, - datasets_pair_classification, - ], - outputs=data_pair_classification, - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Pair Classification Chinese Leaderboard 🎭🇨🇳** - - - **Metric:** Average Precision based on Cosine Similarities (cos_sim_ap) - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_pair_classification_zh = gr.components.Dataframe( - DATA_PAIR_CLASSIFICATION_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_PAIR_CLASSIFICATION_ZH.columns), - type="pandas", - ) - with gr.Row(): - data_run = gr.Button("Refresh") - task_pair_classification_zh = gr.Variable(value=["PairClassification"]) - lang_pair_classification_zh = gr.Variable(value=[]) - datasets_pair_classification_zh = gr.Variable(value=TASK_LIST_PAIR_CLASSIFICATION_ZH) - data_run_classification_zh.click( - get_mteb_data, - inputs=[ - task_pair_classification_zh, - lang_pair_classification_zh, - datasets_pair_classification_zh, - ], - outputs=data_pair_classification_zh, - ) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **Pair Classification Chinese Leaderboard 🎭🇵🇱** - - - **Metric:** Average Precision based on Cosine Similarities (cos_sim_ap) - - **Languages:** Polish - - **Credits:** [Rafał Poświata](https://github.com/rafalposwiata) - """) - with gr.Row(): - data_pair_classification_pl = gr.components.Dataframe( - DATA_PAIR_CLASSIFICATION_PL, - datatype=["number", "markdown"] + ["number"] * len(DATA_PAIR_CLASSIFICATION_PL.columns), - type="pandas", - ) - with gr.Row(): - data_run = gr.Button("Refresh") - task_pair_classification_pl = gr.Variable(value=["PairClassification"]) - lang_pair_classification_pl = gr.Variable(value=[]) - datasets_pair_classification_pl = gr.Variable(value=TASK_LIST_PAIR_CLASSIFICATION_PL) - data_run_classification_pl.click( - get_mteb_data, - inputs=[ - task_pair_classification_pl, - lang_pair_classification_pl, - datasets_pair_classification_pl, - ], - outputs=data_pair_classification_pl, - ) - with gr.TabItem("Reranking"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Reranking English Leaderboard 🥈** - - - **Metric:** Mean Average Precision (MAP) - - **Languages:** English - """) - with gr.Row(): - data_reranking = gr.components.Dataframe( - DATA_RERANKING, - datatype=["number", "markdown"] + ["number"] * len(DATA_RERANKING.columns), - type="pandas", - ) - with gr.Row(): - data_run_reranking = gr.Button("Refresh") - task_reranking = gr.Variable(value=["Reranking"]) - lang_reranking = gr.Variable(value=[]) - datasets_reranking = gr.Variable(value=TASK_LIST_RERANKING) - data_run_reranking.click( - get_mteb_data, - inputs=[ - task_reranking, - lang_reranking, - datasets_reranking, - ], - outputs=data_reranking - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Reranking Chinese Leaderboard 🥈🇨🇳** - - - **Metric:** Mean Average Precision (MAP) - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_reranking_zh = gr.components.Dataframe( - DATA_RERANKING_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_RERANKING_ZH.columns), - type="pandas", - ) - with gr.Row(): - data_run_reranking_zh = gr.Button("Refresh") - task_reranking_zh = gr.Variable(value=["Reranking"]) - lang_reranking_zh = gr.Variable(value=[]) - datasets_reranking_zh = gr.Variable(value=TASK_LIST_RERANKING_ZH) - data_run_reranking_zh.click( - get_mteb_data, - inputs=[task_reranking_zh, lang_reranking_zh, datasets_reranking_zh], - outputs=data_reranking_zh, - ) - with gr.TabItem("Retrieval"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **Retrieval English Leaderboard 🔎** - - - **Metric:** Normalized Discounted Cumulative Gain @ k (ndcg_at_10) - - **Languages:** English - """) - with gr.Row(): - data_retrieval = gr.components.Dataframe( - DATA_RETRIEVAL, - # Add support for more columns than existing as a buffer for CQADupstack & other Retrieval tasks (e.g. MSMARCOv2) - datatype=["number", "markdown"] + ["number"] * len(DATA_RETRIEVAL.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_retrieval = gr.Button("Refresh") - task_retrieval = gr.Variable(value=["Retrieval"]) - lang_retrieval = gr.Variable(value=[]) - datasets_retrieval = gr.Variable(value=TASK_LIST_RETRIEVAL) - data_run_retrieval.click( - get_mteb_data, - inputs=[ - task_retrieval, - lang_retrieval, - datasets_retrieval, - ], - outputs=data_retrieval - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **Retrieval Chinese Leaderboard 🔎🇨🇳** - - - **Metric:** Normalized Discounted Cumulative Gain @ k (ndcg_at_10) - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_retrieval_zh = gr.components.Dataframe( - DATA_RETRIEVAL_ZH, - # Add support for more columns than existing as a buffer for CQADupstack & other Retrieval tasks (e.g. MSMARCOv2) - datatype=["number", "markdown"] + ["number"] * len(DATA_RETRIEVAL_ZH.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_retrieval_zh = gr.Button("Refresh") - task_retrieval_zh = gr.Variable(value=["Retrieval"]) - lang_retrieval_zh = gr.Variable(value=[]) - datasets_retrieval_zh = gr.Variable(value=TASK_LIST_RETRIEVAL_ZH) - data_run_retrieval_zh.click( - get_mteb_data, - inputs=[task_retrieval_zh, lang_retrieval_zh, datasets_retrieval_zh], - outputs=data_retrieval_zh, - ) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **Retrieval Polish Leaderboard 🔎🇵🇱** - - - **Metric:** Normalized Discounted Cumulative Gain @ k (ndcg_at_10) - - **Languages:** Polish - - **Credits:** [Konrad Wojtasik](https://github.com/kwojtasi) & [BEIR-PL](https://arxiv.org/abs/2305.19840) - """) - with gr.Row(): - data_retrieval_pl = gr.components.Dataframe( - DATA_RETRIEVAL_PL, - # Add support for more columns than existing as a buffer for CQADupstack & other Retrieval tasks (e.g. MSMARCOv2) - datatype=["number", "markdown"] + ["number"] * len(DATA_RETRIEVAL_PL.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_retrieval_pl = gr.Button("Refresh") - task_retrieval_pl = gr.Variable(value=["Retrieval"]) - lang_retrieval_pl = gr.Variable(value=[]) - datasets_retrieval_pl = gr.Variable(value=TASK_LIST_RETRIEVAL_PL) - data_run_retrieval_pl.click( - get_mteb_data, - inputs=[task_retrieval_pl, lang_retrieval_pl, datasets_retrieval_pl], - outputs=data_retrieval_pl - ) - with gr.TabItem("STS"): - with gr.TabItem("English"): - with gr.Row(): - gr.Markdown(""" - **STS English Leaderboard 🤖** - - - **Metric:** Spearman correlation based on cosine similarity - - **Languages:** English - """) - with gr.Row(): - data_sts_en = gr.components.Dataframe( - DATA_STS_EN, - datatype=["number", "markdown"] + ["number"] * len(DATA_STS_EN.columns), - type="pandas", - ) - with gr.Row(): - data_run_sts_en = gr.Button("Refresh") - task_sts_en = gr.Variable(value=["STS"]) - lang_sts_en = gr.Variable(value=[]) - datasets_sts_en = gr.Variable(value=TASK_LIST_STS) - data_run_sts_en.click( - get_mteb_data, - inputs=[task_sts_en, lang_sts_en, datasets_sts_en], - outputs=data_sts_en, - ) - with gr.TabItem("Chinese"): - with gr.Row(): - gr.Markdown(""" - **STS Chinese Leaderboard 🤖🇨🇳** - - - **Metric:** Spearman correlation based on cosine similarity - - **Languages:** Chinese - - **Credits:** [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding) - """) - with gr.Row(): - data_sts_zh = gr.components.Dataframe( - DATA_STS_ZH, - datatype=["number", "markdown"] + ["number"] * len(DATA_STS_ZH.columns), - type="pandas", - ) - with gr.Row(): - data_run_sts_zh = gr.Button("Refresh") - task_sts_zh = gr.Variable(value=["STS"]) - lang_sts_zh = gr.Variable(value=[]) - datasets_sts_zh = gr.Variable(value=TASK_LIST_STS_ZH) - data_run_sts_zh.click( - get_mteb_data, - inputs=[task_sts_zh, lang_sts_zh, datasets_sts_zh], - outputs=data_sts_zh, - ) - with gr.TabItem("Polish"): - with gr.Row(): - gr.Markdown(""" - **STS Polish Leaderboard 🤖🇵🇱** - - - **Metric:** Spearman correlation based on cosine similarity - - **Languages:** Polish - - **Credits:** [Rafał Poświata](https://github.com/rafalposwiata) - """) - with gr.Row(): - data_sts_pl = gr.components.Dataframe( - DATA_STS_PL, - datatype=["number", "markdown"] + ["number"] * len(DATA_STS_PL.columns), - type="pandas", - ) - with gr.Row(): - data_run_sts_pl = gr.Button("Refresh") - task_sts_pl = gr.Variable(value=["STS"]) - lang_sts_pl = gr.Variable(value=[]) - datasets_sts_pl = gr.Variable(value=TASK_LIST_STS_PL) - data_run_sts_pl.click( - get_mteb_data, - inputs=[task_sts_pl, lang_sts_pl, datasets_sts_pl], - outputs=data_sts_pl, - ) - with gr.TabItem("Other"): - with gr.Row(): - gr.Markdown(""" - **STS Other Leaderboard 👽** - - - **Metric:** Spearman correlation based on cosine similarity - - **Languages:** Arabic, Chinese, Dutch, English, French, German, Italian, Korean, Polish, Russian, Spanish (Only language combos not included in the other tabs) - """) - with gr.Row(): - data_sts_other = gr.components.Dataframe( - DATA_STS_OTHER, - datatype=["number", "markdown"] + ["number"] * len(DATA_STS_OTHER.columns) * 2, - type="pandas", - ) - with gr.Row(): - data_run_sts_other = gr.Button("Refresh") - task_sts_other = gr.Variable(value=["STS"]) - lang_sts_other = gr.Variable(value=[]) - datasets_sts_other = gr.Variable(value=TASK_LIST_STS_OTHER) - data_run_sts_other.click( - get_mteb_data, - inputs=[task_sts_other, lang_sts_other, task_sts_other, datasets_sts_other], - outputs=data_sts_other - ) - with gr.TabItem("Summarization"): - with gr.Row(): - gr.Markdown(""" - **Summarization Leaderboard 📜** - - - **Metric:** Spearman correlation based on cosine similarity - - **Languages:** English - """) - with gr.Row(): - data_summarization = gr.components.Dataframe( - DATA_SUMMARIZATION, - datatype=["number", "markdown"] + ["number"] * 2, - type="pandas", - ) - with gr.Row(): - data_run = gr.Button("Refresh") - task_summarization = gr.Variable(value=["Summarization"]) - data_run.click( - get_mteb_data, - inputs=[task_summarization], - outputs=data_summarization, - ) - gr.Markdown(r""" - - Made with ❤️ for NLP. If this work is useful to you, please consider citing: - - ```bibtex - @article{muennighoff2022mteb, - doi = {10.48550/ARXIV.2210.07316}, - url = {https://arxiv.org/abs/2210.07316}, - author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils}, - title = {MTEB: Massive Text Embedding Benchmark}, - publisher = {arXiv}, - journal={arXiv preprint arXiv:2210.07316}, - year = {2022} - } - ``` - """) - # Running the functions on page load in addition to when the button is clicked - # This is optional - If deactivated the data loaded at "Build time" is shown like for Overall tab - """ - block.load(get_mteb_data, inputs=[task_bitext_mining], outputs=data_bitext_mining) - """ - -block.queue(concurrency_count=40, max_size=10) -block.launch() - - -# Possible changes: -# Could check if tasks are valid (Currently users could just invent new tasks - similar for languages) -# Could make it load in the background without the Gradio logo closer to the Deep RL space -# Could add graphs / other visual content -# Could add verification marks - -# Sources: -# https://huggingface.co/spaces/gradio/leaderboard -# https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard -# https://getemoji.com/ diff --git a/spaces/abrar-lohia/text-2-character-anim/VQTrans/models/encdec.py b/spaces/abrar-lohia/text-2-character-anim/VQTrans/models/encdec.py deleted file mode 100644 index 2a2792b8c71611f4897a54bae860d0eb237c90f3..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/VQTrans/models/encdec.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch.nn as nn -from resnet import Resnet1D - -class Encoder(nn.Module): - def __init__(self, - input_emb_width = 3, - output_emb_width = 512, - down_t = 3, - stride_t = 2, - width = 512, - depth = 3, - dilation_growth_rate = 3, - activation='relu', - norm=None): - super().__init__() - - blocks = [] - filter_t, pad_t = stride_t * 2, stride_t // 2 - blocks.append(nn.Conv1d(input_emb_width, width, 3, 1, 1)) - blocks.append(nn.ReLU()) - - for i in range(down_t): - input_dim = width - block = nn.Sequential( - nn.Conv1d(input_dim, width, filter_t, stride_t, pad_t), - Resnet1D(width, depth, dilation_growth_rate, activation=activation, norm=norm), - ) - blocks.append(block) - blocks.append(nn.Conv1d(width, output_emb_width, 3, 1, 1)) - self.model = nn.Sequential(*blocks) - - def forward(self, x): - return self.model(x) - -class Decoder(nn.Module): - def __init__(self, - input_emb_width = 3, - output_emb_width = 512, - down_t = 3, - stride_t = 2, - width = 512, - depth = 3, - dilation_growth_rate = 3, - activation='relu', - norm=None): - super().__init__() - blocks = [] - - filter_t, pad_t = stride_t * 2, stride_t // 2 - blocks.append(nn.Conv1d(output_emb_width, width, 3, 1, 1)) - blocks.append(nn.ReLU()) - for i in range(down_t): - out_dim = width - block = nn.Sequential( - Resnet1D(width, depth, dilation_growth_rate, reverse_dilation=True, activation=activation, norm=norm), - nn.Upsample(scale_factor=2, mode='nearest'), - nn.Conv1d(width, out_dim, 3, 1, 1) - ) - blocks.append(block) - blocks.append(nn.Conv1d(width, width, 3, 1, 1)) - blocks.append(nn.ReLU()) - blocks.append(nn.Conv1d(width, input_emb_width, 3, 1, 1)) - self.model = nn.Sequential(*blocks) - - def forward(self, x): - return self.model(x) - diff --git a/spaces/achyuth1344/stable-diffusion-webui/app.py b/spaces/achyuth1344/stable-diffusion-webui/app.py deleted file mode 100644 index c88475b09b7157ce54dc8289652a46d1f384097f..0000000000000000000000000000000000000000 --- a/spaces/achyuth1344/stable-diffusion-webui/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -from subprocess import getoutput - -gpu_info = getoutput('nvidia-smi') -if("A10G" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl") -elif("T4" in gpu_info): - os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl") - -os.system(f"git clone -b v1.5 https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui") -os.chdir("/home/user/app/stable-diffusion-webui") - -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py") -os.system(f"sed -i '$a fastapi==0.90.0' /home/user/app/stable-diffusion-webui/requirements_versions.txt") -os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''') -os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py") -os.system(f"sed -i -e 's/ outputs=\[/queue=False, &/g' /home/user/app/stable-diffusion-webui/modules/ui.py") -os.system(f"sed -i -e 's/ queue=False, / /g' /home/user/app/stable-diffusion-webui/modules/ui.py") - -# ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header---------------------------- -os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py") -os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py") -# --------------------------------------------------------------------------------------------------------------------------------------------------- - -if "IS_SHARED_UI" in os.environ: - os.system(f"rm -rfv /home/user/app/stable-diffusion-webui/scripts/") - - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json") - os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json") - - os.system(f"wget -q https://huggingface.co/ckpt/anything-v3-vae-swapped/resolve/main/anything-v3-vae-swapped.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/anything-v3-vae-swapped.ckpt") - # os.system(f"wget -q {os.getenv('MODEL_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('MODEL_NAME')}") - # os.system(f"wget -q {os.getenv('VAE_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('VAE_NAME')}") - # os.system(f"wget -q {os.getenv('YAML_LINK')} -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/{os.getenv('YAML_NAME')}") - - os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding") -else: - # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py") - os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py") - - # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME") - #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study") - os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser") - os.system(f"git clone https://github.com/camenduru/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui") - - # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt") - #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt") - #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt") - #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt") - #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt") - #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt") - #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt") - #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt") - - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt") - #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt") - - #os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt") - #os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml") - - os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.ckpt") - os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v2-1_768-ema-pruned.yaml") - - os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test") - \ No newline at end of file diff --git a/spaces/adpro/dpt-depth07/README.md b/spaces/adpro/dpt-depth07/README.md deleted file mode 100644 index a2df32f52be298450622acdf691911580499139c..0000000000000000000000000000000000000000 --- a/spaces/adpro/dpt-depth07/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dpt Depth Estimation -emoji: ⚡ -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 2.8.13 -app_file: app.py -pinned: false -duplicated_from: adpro/dpt-depth01 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/adpro/dpt-depth07/app.py b/spaces/adpro/dpt-depth07/app.py deleted file mode 100644 index d53cd25e9a32ed9f2b8c670cb4e9b6f00b05ec82..0000000000000000000000000000000000000000 --- a/spaces/adpro/dpt-depth07/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import gradio as gr -from transformers import DPTFeatureExtractor, DPTForDepthEstimation -import torch -import numpy as np -from PIL import Image - -#torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') - -feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large") -model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large") - -def process_image(image): - # prepare image for the model - encoding = feature_extractor(image, return_tensors="pt") - - # forward pass - with torch.no_grad(): - outputs = model(**encoding) - predicted_depth = outputs.predicted_depth - - # interpolate to original size - prediction = torch.nn.functional.interpolate( - predicted_depth.unsqueeze(1), - size=image.size[::-1], - mode="bicubic", - align_corners=False, - ).squeeze() - output = prediction.cpu().numpy() - formatted = (output * 255 / np.max(output)).astype('uint8') - img = Image.fromarray(formatted) - return img - - return result - -title = "Demo: zero-shot depth estimation with DPT" -description = "Demo for Intel's DPT, a Dense Prediction Transformer for state-of-the-art dense prediction tasks such as semantic segmentation and depth estimation." - - -iface = gr.Interface(fn=process_image, - inputs=gr.inputs.Image(type="pil"), - outputs=gr.outputs.Image(type="pil", label="predicted depth"), - title=title, - description=description, - enable_queue=True) -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/akhaliq/Counterfeit-V2.0/README.md b/spaces/akhaliq/Counterfeit-V2.0/README.md deleted file mode 100644 index 2a184872edfa7e1a5ce56e8fab716dc856493be3..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Counterfeit-V2.0/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Counterfeit V2.0 -emoji: 💩 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/akhaliq/Detic/detic/data/custom_dataset_mapper.py b/spaces/akhaliq/Detic/detic/data/custom_dataset_mapper.py deleted file mode 100644 index c7727dded3f93f5eeafdcd72e257197e3fdc817b..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/Detic/detic/data/custom_dataset_mapper.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import logging -import numpy as np -from typing import List, Optional, Union -import torch -import pycocotools.mask as mask_util - -from detectron2.config import configurable - -from detectron2.data import detection_utils as utils -from detectron2.data.detection_utils import transform_keypoint_annotations -from detectron2.data import transforms as T -from detectron2.data.dataset_mapper import DatasetMapper -from detectron2.structures import Boxes, BoxMode, Instances -from detectron2.structures import Keypoints, PolygonMasks, BitMasks -from fvcore.transforms.transform import TransformList -from .custom_build_augmentation import build_custom_augmentation -from .tar_dataset import DiskTarDataset - -__all__ = ["CustomDatasetMapper"] - -class CustomDatasetMapper(DatasetMapper): - @configurable - def __init__(self, is_train: bool, - with_ann_type=False, - dataset_ann=[], - use_diff_bs_size=False, - dataset_augs=[], - is_debug=False, - use_tar_dataset=False, - tarfile_path='', - tar_index_dir='', - **kwargs): - """ - add image labels - """ - self.with_ann_type = with_ann_type - self.dataset_ann = dataset_ann - self.use_diff_bs_size = use_diff_bs_size - if self.use_diff_bs_size and is_train: - self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs] - self.is_debug = is_debug - self.use_tar_dataset = use_tar_dataset - if self.use_tar_dataset: - print('Using tar dataset') - self.tar_dataset = DiskTarDataset(tarfile_path, tar_index_dir) - super().__init__(is_train, **kwargs) - - - @classmethod - def from_config(cls, cfg, is_train: bool = True): - ret = super().from_config(cfg, is_train) - ret.update({ - 'with_ann_type': cfg.WITH_IMAGE_LABELS, - 'dataset_ann': cfg.DATALOADER.DATASET_ANN, - 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE, - 'is_debug': cfg.IS_DEBUG, - 'use_tar_dataset': cfg.DATALOADER.USE_TAR_DATASET, - 'tarfile_path': cfg.DATALOADER.TARFILE_PATH, - 'tar_index_dir': cfg.DATALOADER.TAR_INDEX_DIR, - }) - if ret['use_diff_bs_size'] and is_train: - if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop': - dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE - dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE - ret['dataset_augs'] = [ - build_custom_augmentation(cfg, True, scale, size) \ - for scale, size in zip(dataset_scales, dataset_sizes)] - else: - assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge' - min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES - max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES - ret['dataset_augs'] = [ - build_custom_augmentation( - cfg, True, min_size=mi, max_size=ma) \ - for mi, ma in zip(min_sizes, max_sizes)] - else: - ret['dataset_augs'] = [] - - return ret - - def __call__(self, dataset_dict): - """ - include image labels - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - # USER: Write your own image loading if it's not from a file - if 'file_name' in dataset_dict: - ori_image = utils.read_image( - dataset_dict["file_name"], format=self.image_format) - else: - ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]] - ori_image = utils._apply_exif_orientation(ori_image) - ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format) - utils.check_image_size(dataset_dict, ori_image) - - # USER: Remove if you don't do semantic/panoptic segmentation. - if "sem_seg_file_name" in dataset_dict: - sem_seg_gt = utils.read_image( - dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2) - else: - sem_seg_gt = None - - if self.is_debug: - dataset_dict['dataset_source'] = 0 - - not_full_labeled = 'dataset_source' in dataset_dict and \ - self.with_ann_type and \ - self.dataset_ann[dataset_dict['dataset_source']] != 'box' - - aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=sem_seg_gt) - if self.use_diff_bs_size and self.is_train: - transforms = \ - self.dataset_augs[dataset_dict['dataset_source']](aug_input) - else: - transforms = self.augmentations(aug_input) - image, sem_seg_gt = aug_input.image, aug_input.sem_seg - - image_shape = image.shape[:2] # h, w - dataset_dict["image"] = torch.as_tensor( - np.ascontiguousarray(image.transpose(2, 0, 1))) - - if sem_seg_gt is not None: - dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long")) - - # USER: Remove if you don't use pre-computed proposals. - # Most users would not need this feature. - if self.proposal_topk is not None: - utils.transform_proposals( - dataset_dict, image_shape, transforms, - proposal_topk=self.proposal_topk - ) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - dataset_dict.pop("sem_seg_file_name", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.use_instance_mask: - anno.pop("segmentation", None) - if not self.use_keypoint: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - all_annos = [ - (utils.transform_instance_annotations( - obj, transforms, image_shape, - keypoint_hflip_indices=self.keypoint_hflip_indices, - ), obj.get("iscrowd", 0)) - for obj in dataset_dict.pop("annotations") - ] - annos = [ann[0] for ann in all_annos if ann[1] == 0] - instances = utils.annotations_to_instances( - annos, image_shape, mask_format=self.instance_mask_format - ) - - del all_annos - if self.recompute_boxes: - instances.gt_boxes = instances.gt_masks.get_bounding_boxes() - dataset_dict["instances"] = utils.filter_empty_instances(instances) - if self.with_ann_type: - dataset_dict["pos_category_ids"] = dataset_dict.get( - 'pos_category_ids', []) - dataset_dict["ann_type"] = \ - self.dataset_ann[dataset_dict['dataset_source']] - if self.is_debug and (('pos_category_ids' not in dataset_dict) or \ - (dataset_dict['pos_category_ids'] == [])): - dataset_dict['pos_category_ids'] = [x for x in sorted(set( - dataset_dict['instances'].gt_classes.tolist() - ))] - return dataset_dict - -# DETR augmentation -def build_transform_gen(cfg, is_train): - """ - """ - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN - max_size = cfg.INPUT.MAX_SIZE_TRAIN - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - if sample_style == "range": - assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) - - logger = logging.getLogger(__name__) - tfm_gens = [] - if is_train: - tfm_gens.append(T.RandomFlip()) - tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) - if is_train: - logger.info("TransformGens used in training: " + str(tfm_gens)) - return tfm_gens - - -class DetrDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by DETR. - The callable currently does the following: - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - def __init__(self, cfg, is_train=True): - if cfg.INPUT.CROP.ENABLED and is_train: - self.crop_gen = [ - T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), - T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), - ] - else: - self.crop_gen = None - - self.mask_on = cfg.MODEL.MASK_ON - self.tfm_gens = build_transform_gen(cfg, is_train) - logging.getLogger(__name__).info( - "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) - ) - - self.img_format = cfg.INPUT.FORMAT - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - if self.crop_gen is None: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - if np.random.rand() > 0.5: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - image, transforms = T.apply_transform_gens( - self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image - ) - - image_shape = image.shape[:2] # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations(obj, transforms, image_shape) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - dataset_dict["instances"] = utils.filter_empty_instances(instances) - return dataset_dict \ No newline at end of file diff --git a/spaces/ali-ghamdan/deoldify/fastai/layers.py b/spaces/ali-ghamdan/deoldify/fastai/layers.py deleted file mode 100644 index ef6a6399f570231ac860afdffc6ba4c257eb389b..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/layers.py +++ /dev/null @@ -1,306 +0,0 @@ -"`fastai.layers` provides essential functions to building and modifying `model` architectures" -from .torch_core import * - -__all__ = ['AdaptiveConcatPool2d', 'BCEWithLogitsFlat', 'BCEFlat', 'MSELossFlat', 'CrossEntropyFlat', 'Debugger', - 'Flatten', 'Lambda', 'PoolFlatten', 'View', 'ResizeBatch', 'bn_drop_lin', 'conv2d', 'conv2d_trans', 'conv_layer', - 'embedding', 'simple_cnn', 'NormType', 'relu', 'batchnorm_2d', 'trunc_normal_', 'PixelShuffle_ICNR', 'icnr', - 'NoopLoss', 'WassersteinLoss', 'SelfAttention', 'SequentialEx', 'MergeLayer', 'res_block', 'sigmoid_range', - 'SigmoidRange', 'PartialLayer', 'FlattenedLoss', 'BatchNorm1dFlat', 'LabelSmoothingCrossEntropy', 'PooledSelfAttention2d'] - -class Lambda(Module): - "Create a layer that simply calls `func` with `x`" - def __init__(self, func:LambdaFunc): self.func=func - def forward(self, x): return self.func(x) - -class View(Module): - "Reshape `x` to `size`" - def __init__(self, *size:int): self.size = size - def forward(self, x): return x.view(self.size) - -class ResizeBatch(Module): - "Reshape `x` to `size`, keeping batch dim the same size" - def __init__(self, *size:int): self.size = size - def forward(self, x): return x.view((x.size(0),) + self.size) - -class Flatten(Module): - "Flatten `x` to a single dimension, often used at the end of a model. `full` for rank-1 tensor" - def __init__(self, full:bool=False): self.full = full - def forward(self, x): return x.view(-1) if self.full else x.view(x.size(0), -1) - -def PoolFlatten()->nn.Sequential: - "Apply `nn.AdaptiveAvgPool2d` to `x` and then flatten the result." - return nn.Sequential(nn.AdaptiveAvgPool2d(1), Flatten()) - -NormType = Enum('NormType', 'Batch BatchZero Weight Spectral Group Instance SpectralGN') - -def batchnorm_2d(nf:int, norm_type:NormType=NormType.Batch): - "A batchnorm2d layer with `nf` features initialized depending on `norm_type`." - bn = nn.BatchNorm2d(nf) - with torch.no_grad(): - bn.bias.fill_(1e-3) - bn.weight.fill_(0. if norm_type==NormType.BatchZero else 1.) - return bn - -def bn_drop_lin(n_in:int, n_out:int, bn:bool=True, p:float=0., actn:Optional[nn.Module]=None): - "Sequence of batchnorm (if `bn`), dropout (with `p`) and linear (`n_in`,`n_out`) layers followed by `actn`." - layers = [nn.BatchNorm1d(n_in)] if bn else [] - if p != 0: layers.append(nn.Dropout(p)) - layers.append(nn.Linear(n_in, n_out)) - if actn is not None: layers.append(actn) - return layers - -def conv1d(ni:int, no:int, ks:int=1, stride:int=1, padding:int=0, bias:bool=False): - "Create and initialize a `nn.Conv1d` layer with spectral normalization." - conv = nn.Conv1d(ni, no, ks, stride=stride, padding=padding, bias=bias) - nn.init.kaiming_normal_(conv.weight) - if bias: conv.bias.data.zero_() - return spectral_norm(conv) - -class PooledSelfAttention2d(Module): - "Pooled self attention layer for 2d." - def __init__(self, n_channels:int): - self.n_channels = n_channels - self.theta = spectral_norm(conv2d(n_channels, n_channels//8, 1)) # query - self.phi = spectral_norm(conv2d(n_channels, n_channels//8, 1)) # key - self.g = spectral_norm(conv2d(n_channels, n_channels//2, 1)) # value - self.o = spectral_norm(conv2d(n_channels//2, n_channels, 1)) - self.gamma = nn.Parameter(tensor([0.])) - - def forward(self, x): - # code borrowed from https://github.com/ajbrock/BigGAN-PyTorch/blob/7b65e82d058bfe035fc4e299f322a1f83993e04c/layers.py#L156 - theta = self.theta(x) - phi = F.max_pool2d(self.phi(x), [2,2]) - g = F.max_pool2d(self.g(x), [2,2]) - theta = theta.view(-1, self.n_channels // 8, x.shape[2] * x.shape[3]) - phi = phi.view(-1, self.n_channels // 8, x.shape[2] * x.shape[3] // 4) - g = g.view(-1, self.n_channels // 2, x.shape[2] * x.shape[3] // 4) - beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) - o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.n_channels // 2, x.shape[2], x.shape[3])) - return self.gamma * o + x - -class SelfAttention(Module): - "Self attention layer for nd." - def __init__(self, n_channels:int): - self.query = conv1d(n_channels, n_channels//8) - self.key = conv1d(n_channels, n_channels//8) - self.value = conv1d(n_channels, n_channels) - self.gamma = nn.Parameter(tensor([0.])) - - def forward(self, x): - #Notation from https://arxiv.org/pdf/1805.08318.pdf - size = x.size() - x = x.view(*size[:2],-1) - f,g,h = self.query(x),self.key(x),self.value(x) - beta = F.softmax(torch.bmm(f.permute(0,2,1).contiguous(), g), dim=1) - o = self.gamma * torch.bmm(h, beta) + x - return o.view(*size).contiguous() - -def conv2d(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias=False, init:LayerFunc=nn.init.kaiming_normal_) -> nn.Conv2d: - "Create and initialize `nn.Conv2d` layer. `padding` defaults to `ks//2`." - if padding is None: padding = ks//2 - return init_default(nn.Conv2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias), init) - -def conv2d_trans(ni:int, nf:int, ks:int=2, stride:int=2, padding:int=0, bias=False) -> nn.ConvTranspose2d: - "Create `nn.ConvTranspose2d` layer." - return nn.ConvTranspose2d(ni, nf, kernel_size=ks, stride=stride, padding=padding, bias=bias) - -def relu(inplace:bool=False, leaky:float=None): - "Return a relu activation, maybe `leaky` and `inplace`." - return nn.LeakyReLU(inplace=inplace, negative_slope=leaky) if leaky is not None else nn.ReLU(inplace=inplace) - -def conv_layer(ni:int, nf:int, ks:int=3, stride:int=1, padding:int=None, bias:bool=None, is_1d:bool=False, - norm_type:Optional[NormType]=NormType.Batch, use_activ:bool=True, leaky:float=None, - transpose:bool=False, init:Callable=nn.init.kaiming_normal_, self_attention:bool=False): - "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers." - if padding is None: padding = (ks-1)//2 if not transpose else 0 - bn = norm_type in (NormType.Batch, NormType.BatchZero) - if bias is None: bias = not bn - conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d - conv = init_default(conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding), init) - if norm_type==NormType.Weight: conv = weight_norm(conv) - elif norm_type==NormType.Spectral: conv = spectral_norm(conv) - layers = [conv] - if use_activ: layers.append(relu(True, leaky=leaky)) - if bn: layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf)) - if self_attention: layers.append(SelfAttention(nf)) - return nn.Sequential(*layers) - -class SequentialEx(Module): - "Like `nn.Sequential`, but with ModuleList semantics, and can access module input" - def __init__(self, *layers): self.layers = nn.ModuleList(layers) - - def forward(self, x): - res = x - for l in self.layers: - res.orig = x - nres = l(res) - #print(l. + ' mean: ' + str(nres.abs().mean())) - #print(' max: ' + str(nres.abs().max())) - # We have to remove res.orig to avoid hanging refs and therefore memory leaks - res.orig = None - res = nres - return res - - def __getitem__(self,i): return self.layers[i] - def append(self,l): return self.layers.append(l) - def extend(self,l): return self.layers.extend(l) - def insert(self,i,l): return self.layers.insert(i,l) - -class MergeLayer(Module): - "Merge a shortcut with the result of the module by adding them or concatenating thme if `dense=True`." - def __init__(self, dense:bool=False): self.dense=dense - def forward(self, x): return torch.cat([x,x.orig], dim=1) if self.dense else (x+x.orig) - -def res_block(nf, dense:bool=False, norm_type:Optional[NormType]=NormType.Batch, bottle:bool=False, **conv_kwargs): - "Resnet block of `nf` features. `conv_kwargs` are passed to `conv_layer`." - norm2 = norm_type - if not dense and (norm_type==NormType.Batch): norm2 = NormType.BatchZero - nf_inner = nf//2 if bottle else nf - return SequentialEx(conv_layer(nf, nf_inner, norm_type=norm_type, **conv_kwargs), - conv_layer(nf_inner, nf, norm_type=norm2, **conv_kwargs), - MergeLayer(dense)) - -def sigmoid_range(x:Tensor, low:int, high:int): - "Sigmoid function with range `(low, high)`" - return torch.sigmoid(x) * (high - low) + low - -class SigmoidRange(Module): - "Sigmoid module with range `(low,x_max)`" - def __init__(self, low:int, high:int): self.low,self.high = low,high - def forward(self, x): return sigmoid_range(x, self.low, self.high) - -class PartialLayer(Module): - "Layer that applies `partial(func, **kwargs)`." - def __init__(self, func, **kwargs): self.repr,self.func = f'{func}({kwargs})', partial(func, **kwargs) - def forward(self, x): return self.func(x) - def __repr__(self): return self.repr - -class AdaptiveConcatPool2d(Module): - "Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`." - def __init__(self, sz:Optional[int]=None): - "Output will be 2*sz or 2 if sz is None" - self.output_size = sz or 1 - self.ap = nn.AdaptiveAvgPool2d(self.output_size) - self.mp = nn.AdaptiveMaxPool2d(self.output_size) - - def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1) - -class Debugger(Module): - "A module to debug inside a model." - def forward(self,x:Tensor) -> Tensor: - set_trace() - return x - -def icnr(x, scale=2, init=nn.init.kaiming_normal_): - "ICNR init of `x`, with `scale` and `init` function." - ni,nf,h,w = x.shape - ni2 = int(ni/(scale**2)) - k = init(torch.zeros([ni2,nf,h,w])).transpose(0, 1) - k = k.contiguous().view(ni2, nf, -1) - k = k.repeat(1, 1, scale**2) - k = k.contiguous().view([nf,ni,h,w]).transpose(0, 1) - x.data.copy_(k) - -class PixelShuffle_ICNR(Module): - "Upsample by `scale` from `ni` filters to `nf` (default `ni`), using `nn.PixelShuffle`, `icnr` init, and `weight_norm`." - def __init__(self, ni:int, nf:int=None, scale:int=2, blur:bool=False, norm_type=NormType.Weight, leaky:float=None): - nf = ifnone(nf, ni) - self.conv = conv_layer(ni, nf*(scale**2), ks=1, norm_type=norm_type, use_activ=False) - icnr(self.conv[0].weight) - self.shuf = nn.PixelShuffle(scale) - # Blurring over (h*w) kernel - # "Super-Resolution using Convolutional Neural Networks without Any Checkerboard Artifacts" - # - https://arxiv.org/abs/1806.02658 - self.pad = nn.ReplicationPad2d((1,0,1,0)) - self.blur = nn.AvgPool2d(2, stride=1) - self.relu = relu(True, leaky=leaky) - - def forward(self,x): - x = self.shuf(self.relu(self.conv(x))) - return self.blur(self.pad(x)) if self.blur else x - -class FlattenedLoss(): - "Same as `func`, but flattens input and target." - def __init__(self, func, *args, axis:int=-1, floatify:bool=False, is_2d:bool=True, **kwargs): - self.func,self.axis,self.floatify,self.is_2d = func(*args,**kwargs),axis,floatify,is_2d - functools.update_wrapper(self, self.func) - - def __repr__(self): return f"FlattenedLoss of {self.func}" - @property - def reduction(self): return self.func.reduction - @reduction.setter - def reduction(self, v): self.func.reduction = v - - def __call__(self, input:Tensor, target:Tensor, **kwargs)->Rank0Tensor: - input = input.transpose(self.axis,-1).contiguous() - target = target.transpose(self.axis,-1).contiguous() - if self.floatify: target = target.float() - input = input.view(-1,input.shape[-1]) if self.is_2d else input.view(-1) - return self.func.__call__(input, target.view(-1), **kwargs) - -def CrossEntropyFlat(*args, axis:int=-1, **kwargs): - "Same as `nn.CrossEntropyLoss`, but flattens input and target." - return FlattenedLoss(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) - -def BCEWithLogitsFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." - return FlattenedLoss(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -def BCEFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.BCELoss`, but flattens input and target." - return FlattenedLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -def MSELossFlat(*args, axis:int=-1, floatify:bool=True, **kwargs): - "Same as `nn.MSELoss`, but flattens input and target." - return FlattenedLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) - -class NoopLoss(Module): - "Just returns the mean of the `output`." - def forward(self, output, *args): return output.mean() - -class WassersteinLoss(Module): - "For WGAN." - def forward(self, real, fake): return real.mean() - fake.mean() - -def simple_cnn(actns:Collection[int], kernel_szs:Collection[int]=None, - strides:Collection[int]=None, bn=False) -> nn.Sequential: - "CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`." - nl = len(actns)-1 - kernel_szs = ifnone(kernel_szs, [3]*nl) - strides = ifnone(strides , [2]*nl) - layers = [conv_layer(actns[i], actns[i+1], kernel_szs[i], stride=strides[i], - norm_type=(NormType.Batch if bn and i<(len(strides)-1) else None)) for i in range_of(strides)] - layers.append(PoolFlatten()) - return nn.Sequential(*layers) - -def trunc_normal_(x:Tensor, mean:float=0., std:float=1.) -> Tensor: - "Truncated normal initialization." - # From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12 - return x.normal_().fmod_(2).mul_(std).add_(mean) - -def embedding(ni:int,nf:int) -> nn.Module: - "Create an embedding layer." - emb = nn.Embedding(ni, nf) - # See https://arxiv.org/abs/1711.09160 - with torch.no_grad(): trunc_normal_(emb.weight, std=0.01) - return emb - -class BatchNorm1dFlat(nn.BatchNorm1d): - "`nn.BatchNorm1d`, but first flattens leading dimensions" - def forward(self, x): - if x.dim()==2: return super().forward(x) - *f,l = x.shape - x = x.contiguous().view(-1,l) - return super().forward(x).view(*f,l) - -class LabelSmoothingCrossEntropy(Module): - def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction - - def forward(self, output, target): - c = output.size()[-1] - log_preds = F.log_softmax(output, dim=-1) - if self.reduction=='sum': loss = -log_preds.sum() - else: - loss = -log_preds.sum(dim=-1) - if self.reduction=='mean': loss = loss.mean() - return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target, reduction=self.reduction) diff --git a/spaces/ali-ghamdan/realesrgan-models/CODE_OF_CONDUCT.md b/spaces/ali-ghamdan/realesrgan-models/CODE_OF_CONDUCT.md deleted file mode 100644 index e8cc4daa4345590464314889b187d6a2d7a8e20f..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/realesrgan-models/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,128 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -We as members, contributors, and leaders pledge to make participation in our -community a harassment-free experience for everyone, regardless of age, body -size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, -nationality, personal appearance, race, religion, or sexual identity -and orientation. - -We pledge to act and interact in ways that contribute to an open, welcoming, -diverse, inclusive, and healthy community. - -## Our Standards - -Examples of behavior that contributes to a positive environment for our -community include: - -* Demonstrating empathy and kindness toward other people -* Being respectful of differing opinions, viewpoints, and experiences -* Giving and gracefully accepting constructive feedback -* Accepting responsibility and apologizing to those affected by our mistakes, - and learning from the experience -* Focusing on what is best not just for us as individuals, but for the - overall community - -Examples of unacceptable behavior include: - -* The use of sexualized language or imagery, and sexual attention or - advances of any kind -* Trolling, insulting or derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or email - address, without their explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Enforcement Responsibilities - -Community leaders are responsible for clarifying and enforcing our standards of -acceptable behavior and will take appropriate and fair corrective action in -response to any behavior that they deem inappropriate, threatening, offensive, -or harmful. - -Community leaders have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are -not aligned to this Code of Conduct, and will communicate reasons for moderation -decisions when appropriate. - -## Scope - -This Code of Conduct applies within all community spaces, and also applies when -an individual is officially representing the community in public spaces. -Examples of representing our community include using an official e-mail address, -posting via an official social media account, or acting as an appointed -representative at an online or offline event. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement at -xintao.wang@outlook.com or xintaowang@tencent.com. -All complaints will be reviewed and investigated promptly and fairly. - -All community leaders are obligated to respect the privacy and security of the -reporter of any incident. - -## Enforcement Guidelines - -Community leaders will follow these Community Impact Guidelines in determining -the consequences for any action they deem in violation of this Code of Conduct: - -### 1. Correction - -**Community Impact**: Use of inappropriate language or other behavior deemed -unprofessional or unwelcome in the community. - -**Consequence**: A private, written warning from community leaders, providing -clarity around the nature of the violation and an explanation of why the -behavior was inappropriate. A public apology may be requested. - -### 2. Warning - -**Community Impact**: A violation through a single incident or series -of actions. - -**Consequence**: A warning with consequences for continued behavior. No -interaction with the people involved, including unsolicited interaction with -those enforcing the Code of Conduct, for a specified period of time. This -includes avoiding interactions in community spaces as well as external channels -like social media. Violating these terms may lead to a temporary or -permanent ban. - -### 3. Temporary Ban - -**Community Impact**: A serious violation of community standards, including -sustained inappropriate behavior. - -**Consequence**: A temporary ban from any sort of interaction or public -communication with the community for a specified period of time. No public or -private interaction with the people involved, including unsolicited interaction -with those enforcing the Code of Conduct, is allowed during this period. -Violating these terms may lead to a permanent ban. - -### 4. Permanent Ban - -**Community Impact**: Demonstrating a pattern of violation of community -standards, including sustained inappropriate behavior, harassment of an -individual, or aggression toward or disparagement of classes of individuals. - -**Consequence**: A permanent ban from any sort of public interaction within -the community. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 2.0, available at -https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. - -Community Impact Guidelines were inspired by [Mozilla's code of conduct -enforcement ladder](https://github.com/mozilla/diversity). - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see the FAQ at -https://www.contributor-covenant.org/faq. Translations are available at -https://www.contributor-covenant.org/translations. diff --git a/spaces/aliabd/SummerTime/model/single_doc/base_single_doc_model.py b/spaces/aliabd/SummerTime/model/single_doc/base_single_doc_model.py deleted file mode 100644 index 079700afaa3a270bf2424a0bb75a71cccc861a10..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/single_doc/base_single_doc_model.py +++ /dev/null @@ -1,36 +0,0 @@ -from model.base_model import SummModel - - -class SingleDocSummModel(SummModel): - def __init__( - self, - trained_domain: str = None, - max_input_length: int = None, - max_output_length: int = None, - ): - super(SingleDocSummModel, self).__init__( - trained_domain=trained_domain, - max_input_length=max_input_length, - max_output_length=max_output_length, - ) - - @classmethod - def assert_summ_input_type(cls, corpus, query): - if not isinstance(corpus, list): - raise TypeError( - "Single-document summarization requires corpus of `List[str]`." - ) - if not all([isinstance(ins, str) for ins in corpus]): - raise TypeError( - "Single-document summarization requires corpus of `List[str]`." - ) - - if query is not None: - if not isinstance(query, list): - raise TypeError( - "Query-based single-document summarization requires query of `List[str]`." - ) - if not all([isinstance(q, str) for q in query]): - raise TypeError( - "Query-based single-document summarization requires query of `List[str]`." - ) diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/__init__.py b/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/__init__.py deleted file mode 100644 index d0539435729f8df6f6e98a3cd86d66627971ae58..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/DataLoader/infinibatch/infinibatch/__init__.py +++ /dev/null @@ -1,293 +0,0 @@ -""" -Infinibatch is a library of checkpointable iterators for randomized data loading of massive data sets in deep neural network training. - - -## Features - - * support for corpora much larger than fit into RAM - * hierarchical block+sentence-level randomization over the whole corpus, different randomization in each epoch - * only load the data that is needed - * very fast start-up time (does not need to read full corpus) - * only requires the most basic of data preparation (e.g. no indexing) - * for multi-GPU, only load what the respective GPU needs - * 100% accurate check-pointing, restore from checkpoint should not read all data up to the checkpoint - * support automatic bucketed batching with dynamic batch sizes - * pre-fetching thread - * composable, as to support for complex batching, e.g. negative samples from multiple documents - - -## Getting Started - -Infinibatch requires Python 3.5 and has no dependencies. -There is presently no pip package. -To install it, please copy this library into a subfolder in your project: -```bash -cd YOUR_PROJECT_FOLDER -git clone https://msasg.visualstudio.com/DefaultCollection/SDRG/_git/infinibatch -``` -or, better, as a submodule reference: -```bash -git submodule add https://msasg.visualstudio.com/DefaultCollection/SDRG/_git/infinibatch -``` -It is now located at `infinibatch/infinibatch`, e.g. the main import file is `infinibatch/infinibatch/__init__.py`. - -To import it, you need to add that folder to your `PYTHONPATH` variable externally, or to `sys.path` inside the code: -```python -import sys -sys.path.insert(0,'infinibatch') # note: relative paths are relative to your current dir, not to the python script -import infinibatch -``` - -## Tutorial - -This little tutorial walks you through the steps of preparing your data and consuming them from Python code as batches. - -### Infinibatch Basics: Iterators and Checkpointing - -Infinibatch provides [Python iterators](https://docs.python.org/3.5/glossary.html#term-iterator) -to read your data. -An iterator represents a stream of data that can be retrieved item by item, e.g. via a -`for` loop or repeatedly calling `next()` on it. - -Infinibatch is agnostic to the data type of the items, which is determined by a user-supplied file-read function. -In NLP applications, items would typically be tuples of text. In other applications, -they can be images or an audio file with a textual annotation. - -Infinibatch makes it easy to read your data in randomized order, and supports checkpointing, which allows you to restart training exactly where you left off. - -Randomization is done _on the fly_, which means that it is not necessary to read the entire data set into memory -to be shuffled. Infinibatch implements a hierarchical shuffling algorithm -that only holds a subset of the data in RAM at any point in time. - -Infinibatch iterators are _checkpointable_. -Checkpointing lets you retrieve the current position (the "checkpoint") in the data stream at any time, so that -later, you can "rewind" to that same position. -The sad reality is that long-running trainings occasionally crash. -To be able to continue a crashed training as if it had not crashed, -save your Infinibatch iterator's checkpoint to disk whenever you save an intermediate model during training. -To restart a crashed training, reset the iterator to the saved checkpoint. -The data reader will now yield the exact same data-item sequence it would have yielded without the crash. - -### Data Preparation - -Infinibatch has one requirement on your data organization: -To use your data with Infinibatch, it must be split into a large number of small chunks. -A chunk is the smallest unit of data that is loaded from disk into RAM. Infinibatch holds a random subset of chunks in memory -that it randomly draws samples from. - -Below we want to show how such a split can be created. An easy way to split your data into chunks is with the Linux `split` command. - -In this tutorial, our "corpus" consists of 6 lines of text, where each line is one data item. -To create that corpus, please run this command in a bash shell. It creates a 6-line text file named `corpus.txt`: -```bash -echo \\ -'Lorem ipsum dolor sit amet, -consectetur adipiscing elit, -sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. -Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. -Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. -The quick brown fox jumps over the lazy dog.' \\ -> corpus.txt -``` -Now let us split it into 3 chunks of 2 lines each. Each chunk is stored as a zipped text file. -We will create them inside a new subdirectory called `corpus_chunks`: -```bash -mkdir corpus_chunks -split --lines 2 --numeric-suffixes \\ - --filter 'gzip > corpus_chunks/$FILE.txt.gz' \\ - corpus.txt corpus. -``` -This will have created three files: `corpus_chunks/corpus.00.txt.gz`, `corpus_chunks/corpus.01.txt.gz`, and `corpus_chunks/corpus.02.txt.gz`. -To verify whether the data has been split as expected, you can use this command: -```bash -zcat corpus_chunks/corpus.*.txt.gz -``` - -Hint: For large corpora, we recommend replacing `gzip` by `pigz` (`apt-get install pigz`), which runs notably faster via multi-threading. - -### Reading Items in Random Order With Infinibatch - -We will first show the easiest way to read data with Infinibatch, using the helper function `chunked_dataset_iterator``()`. -This function will create an Infinibatch iterator that yields the content of your data in random order. -Please the following program: -```python -import sys, gzip, glob -sys.path.insert(0,'infinibatch') -from infinibatch import datasets as ds - -ds = ds.chunked_dataset_iterator( - chunk_refs = glob.glob('corpus_chunks/corpus.*.txt.gz'), - read_chunk_fn = lambda path: iter(gzip.decompress(open(path, "rb") \\ - .read()).decode(encoding='utf-8') \\ - .splitlines()), - buffer_size = 6, seed = 1) - -for i in range(10): - print(next(ds)) -``` -You should get output that contains the 6 example lines in randomized order: -```text -Lorem ipsum dolor sit amet, -consectetur adipiscing elit, -Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. -Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. -The quick brown fox jumps over the lazy dog. -sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. -consectetur adipiscing elit, -Lorem ipsum dolor sit amet, -The quick brown fox jumps over the lazy dog. -sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. -``` -Note: The `buffer_size` parameter determines how many sentences are read into memory at any given time, -to draw randomized items from. In real settings with corpora of hundreds of millions of text lines, -the `buffer_size` parameter should be set in the millions. -RAM usage and startup time will be proportional to the buffer size -(but much lower than having to load the entire corpus into RAM). - -### Reading Items of Different Lengths in Batches - -For deep learning, we want to group multiple items into batches. -For NLP tasks, items are often lines of text of varying length. -Infinibatch implements an algorithm that randomizes the input sequence and groups it into -batches of approximately the same length (aka _bucketing_). - -Infinibatch's `BucketedReadaheadBatchIterator` performs this task. -It implements an algorithm modeled after the [Marian toolkit](https://github.com/marian-nmt/marian) -that preloads a large number of randomized items (typically millions; in this example: 6), -sorts them and groups them into batches of similar length, and then yields -them, in turn, in randomized order. - -Here is an example. Note that the `BucketedReadaheadBatchIterator` accepts -the previous randomized sentence sequence iterator (`ds`) as the source of items to randomize over. -This is an example how one forms pipelines of iterators with Infinibatch -(a concept familiar from Python's own `itertools`). -Once an iterator is passed to another as its source, consider it owned by that other iterator, -it must no longer be accessed by the calling code. -```python -import sys, gzip, glob -sys.path.insert(0,'infinibatch') -from infinibatch import datasets as ds -from infinibatch import iterators as it - -ds = ds.chunked_dataset_iterator( - chunk_refs = glob.glob('corpus_chunks/corpus.*.txt.gz'), - read_chunk_fn = lambda path: iter(gzip.decompress(open(path, "rb") \\ - .read()).decode(encoding='utf-8') \\ - .splitlines()), - buffer_size = 6, seed = 1) - -bs = it.BucketedReadaheadBatchIterator( - source_iterator = ds, # note: this is the iterator from above - read_ahead = 6, - key = lambda line: len(line), - batch_size = 2, - seed = 1) - -for i in range(25): - print(next(bs)) -``` -This code should output something like this: -```python -['sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.', - 'The quick brown fox jumps over the lazy dog.'] -['consectetur adipiscing elit,', 'Lorem ipsum dolor sit amet,'] -['Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.', - 'Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.'] -``` -followed by different permutations of the same tuples. -As you can see, the sentences are in random order and grouped in batches of 2 of approximately the same length. -You may notice that there is no variation in how the items get grouped into batches--that -is an artifact of this example, and generally not the case in real use when the data size is much larger -than the batch size. - -In NLP, sentence length often varies considerably. As a result, using batches of a fixed number of lines, -as in the example above, will waste GPU RAM and cores. -This is because the number of lines is limited by the longest possible sequence; batches of shorter lines -would leave GPU cycles on the table. -Ideally, one would use batches that have as many lines as fit into GPU RAM, -given the number of tokens of the longest line in the batch. -To support variable batch sizes, Infinibatch allows to pass a function as the `batch_size` parameter. -That function will be given the longest item of a batch and should estimate how many items of at most this length can fit. - -In our example, we assume that batches can hold at most 150 tokens. -Please change the above code as follows: -```python - batch_size = lambda longest_line: 150 // len(longest_line), -``` -The output looks like this: -``` -['consectetur adipiscing elit,', 'Lorem ipsum dolor sit amet,'] -['Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.'] -['sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.', - 'The quick brown fox jumps over the lazy dog.'] -['Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.'] -``` -That shorter sentences got grouped, while longer did not because they would exceed the total of 150 characters. - -### Reading Batches Into Numpy Arrays - -Lastly, we will need to feed batches into our favorite deep-learning tool. -We will show how to convert the batches of text lines into padded `numpy` arrays. - -In a typical NLP application, text items would be tokenized, and then each token -would be represented by an index into a unit vocabulary. -For simplicity, in this example each character is its own token, -and each token's numeric unit index is just its ASCII code. -These sequences are then padded to equal length with -1, and converted into a `numpy` array. - -Please rerun the previous example, but first insert the following code before the final `for` loop. -This example uses an Infinibatch `MapIterator`, which applies a user-supplied function or -lambda to each item: -```python -import numpy as np -def collate(lines_batch): - # tokenize all lines in the batch and map to unit ids - ids_batch = [[ord(c) for c in line] for line in lines_batch] - # create a padded numpy array as wide as the longest line, - # where shorter sequences are padded with -1 - width = max(len(ids) for ids in ids_batch) - return np.array([ids + [-1] * (width-len(ids)) for ids in ids_batch]) - -bs = it.MapIterator( - source_iterator = bs, - transform = collate) -``` -This will output batches like this. Note that in batches with multiple sentences, -some entries are padded with `-1`. -```python -[[ 99 111 110 115 101 99 116 101 116 117 114 32 97 100 105 112 105 115 - 99 105 110 103 32 101 108 105 116 44] - [ 76 111 114 101 109 32 105 112 115 117 109 32 100 111 108 111 114 32 - 115 105 116 32 97 109 101 116 44 -1]] -[[ 85 116 32 101 110 105 109 32 97 100 32 109 105 110 105 109 32 118 - 101 110 105 97 109 44 32 113 117 105 115 32 110 111 115 116 114 117 - 100 32 101 120 101 114 99 105 116 97 116 105 111 110 32 117 108 108 - 97 109 99 111 32 108 97 98 111 114 105 115 32 110 105 115 105 32 - 117 116 32 97 108 105 113 117 105 112 32 101 120 32 101 97 32 99 - 111 109 109 111 100 111 32 99 111 110 115 101 113 117 97 116 46]] -[[115 101 100 32 100 111 32 101 105 117 115 109 111 100 32 116 101 109 - 112 111 114 32 105 110 99 105 100 105 100 117 110 116 32 117 116 32 - 108 97 98 111 114 101 32 101 116 32 100 111 108 111 114 101 32 109 - 97 103 110 97 32 97 108 105 113 117 97 46] - [ 84 104 101 32 113 117 105 99 107 32 98 114 111 119 110 32 102 111 - 120 32 106 117 109 112 115 32 111 118 101 114 32 116 104 101 32 108 - 97 122 121 32 100 111 103 46 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 - -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1]] -[[ 68 117 105 115 32 97 117 116 101 32 105 114 117 114 101 32 100 111 - 108 111 114 32 105 110 32 114 101 112 114 101 104 101 110 100 101 114 - 105 116 32 105 110 32 118 111 108 117 112 116 97 116 101 32 118 101 - 108 105 116 32 101 115 115 101 32 99 105 108 108 117 109 32 100 111 - 108 111 114 101 32 101 117 32 102 117 103 105 97 116 32 110 117 108 - 108 97 32 112 97 114 105 97 116 117 114 46]] -``` - -## Where To Go From Here - -The above tutorial showed you the use of the most common iterator type, as created by the -convenience function `chunked_dataset_iterator()`. - -Not all real-life scenarios are covered by this function. For example, multi-task learning -scenarios require more complex combinations of data. To create those, you will need -to compose the necessary data reader from the underlying building blocks. -This is described at the documentation of the module `iterators`. -""" diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/XMLDecl.pod b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/XMLDecl.pod deleted file mode 100644 index f6e6a3a48a1fd8d961f356e89dc77adb782b02da..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/XMLDecl.pod +++ /dev/null @@ -1,33 +0,0 @@ -=head1 NAME - -XML::DOM::XMLDecl - XML declaration in XML::DOM - -=head1 DESCRIPTION - -XML::DOM::XMLDecl extends L, but is not part of the DOM Level 1 -specification. - -It contains the XML declaration, e.g. - - - -See also XML::DOM::Document::getXMLDecl. - -=head2 METHODS - -=over 4 - -=item getVersion and setVersion (version) - -Returns and sets the XML version. At the time of this writing the version should -always be "1.0" - -=item getEncoding and setEncoding (encoding) - -undef may be specified for the encoding value. - -=item getStandalone and setStandalone (standalone) - -undef may be specified for the standalone value. - -=back diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.h b/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.h deleted file mode 100644 index 2228cb331d924d6cfcd84824d61ca7bc7c129a99..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.h +++ /dev/null @@ -1,224 +0,0 @@ -/* - * $Id$ - * Portable Audio I/O Library - * UNIX platform-specific support functions - * - * Based on the Open Source API proposed by Ross Bencina - * Copyright (c) 1999-2000 Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -/** @file - @ingroup unix_src -*/ - -#ifndef PA_UNIX_UTIL_H -#define PA_UNIX_UTIL_H - -#include "pa_cpuload.h" -#include -#include -#include - -#ifdef __cplusplus -extern "C" -{ -#endif /* __cplusplus */ - -#define PA_MIN(x,y) ( (x) < (y) ? (x) : (y) ) -#define PA_MAX(x,y) ( (x) > (y) ? (x) : (y) ) - -/* Utilize GCC branch prediction for error tests */ -#if defined __GNUC__ && __GNUC__ >= 3 -#define UNLIKELY(expr) __builtin_expect( (expr), 0 ) -#else -#define UNLIKELY(expr) (expr) -#endif - -#define STRINGIZE_HELPER(expr) #expr -#define STRINGIZE(expr) STRINGIZE_HELPER(expr) - -#define PA_UNLESS(expr, code) \ - do { \ - if( UNLIKELY( (expr) == 0 ) ) \ - { \ - PaUtil_DebugPrint(( "Expression '" #expr "' failed in '" __FILE__ "', line: " STRINGIZE( __LINE__ ) "\n" )); \ - result = (code); \ - goto error; \ - } \ - } while (0); - -static PaError paUtilErr_; /* Used with PA_ENSURE */ - -/* Check PaError */ -#define PA_ENSURE(expr) \ - do { \ - if( UNLIKELY( (paUtilErr_ = (expr)) < paNoError ) ) \ - { \ - PaUtil_DebugPrint(( "Expression '" #expr "' failed in '" __FILE__ "', line: " STRINGIZE( __LINE__ ) "\n" )); \ - result = paUtilErr_; \ - goto error; \ - } \ - } while (0); - -#define PA_ASSERT_CALL(expr, success) \ - paUtilErr_ = (expr); \ - assert( success == paUtilErr_ ); - -#define PA_ENSURE_SYSTEM(expr, success) \ - do { \ - if( UNLIKELY( (paUtilErr_ = (expr)) != success ) ) \ - { \ - /* PaUtil_SetLastHostErrorInfo should only be used in the main thread */ \ - if( pthread_equal(pthread_self(), paUnixMainThread) ) \ - { \ - PaUtil_SetLastHostErrorInfo( paALSA, paUtilErr_, strerror( paUtilErr_ ) ); \ - } \ - PaUtil_DebugPrint( "Expression '" #expr "' failed in '" __FILE__ "', line: " STRINGIZE( __LINE__ ) "\n" ); \ - result = paUnanticipatedHostError; \ - goto error; \ - } \ - } while( 0 ); - -typedef struct { - pthread_t callbackThread; -} PaUtilThreading; - -PaError PaUtil_InitializeThreading( PaUtilThreading *threading ); -void PaUtil_TerminateThreading( PaUtilThreading *threading ); -PaError PaUtil_StartThreading( PaUtilThreading *threading, void *(*threadRoutine)(void *), void *data ); -PaError PaUtil_CancelThreading( PaUtilThreading *threading, int wait, PaError *exitResult ); - -/* State accessed by utility functions */ - -/* -void PaUnix_SetRealtimeScheduling( int rt ); - -void PaUtil_InitializeThreading( PaUtilThreading *th, PaUtilCpuLoadMeasurer *clm ); - -PaError PaUtil_CreateCallbackThread( PaUtilThreading *th, void *(*CallbackThreadFunc)( void * ), PaStream *s ); - -PaError PaUtil_KillCallbackThread( PaUtilThreading *th, PaError *exitResult ); - -void PaUtil_CallbackUpdate( PaUtilThreading *th ); -*/ - -extern pthread_t paUnixMainThread; - -typedef struct -{ - pthread_mutex_t mtx; -} PaUnixMutex; - -PaError PaUnixMutex_Initialize( PaUnixMutex* self ); -PaError PaUnixMutex_Terminate( PaUnixMutex* self ); -PaError PaUnixMutex_Lock( PaUnixMutex* self ); -PaError PaUnixMutex_Unlock( PaUnixMutex* self ); - -typedef struct -{ - pthread_t thread; - int parentWaiting; - int stopRequested; - int locked; - PaUnixMutex mtx; - pthread_cond_t cond; - volatile sig_atomic_t stopRequest; -} PaUnixThread; - -/** Initialize global threading state. - */ -PaError PaUnixThreading_Initialize( void ); - -/** Perish, passing on eventual error code. - * - * A thin wrapper around pthread_exit, will automatically pass on any error code to the joining thread. - * If the result indicates an error, i.e. it is not equal to paNoError, this function will automatically - * allocate a pointer so the error is passed on with pthread_exit. If the result indicates that all is - * well however, only a NULL pointer will be handed to pthread_exit. Thus, the joining thread should - * check whether a non-NULL result pointer is obtained from pthread_join and make sure to free it. - * @param result: The error code to pass on to the joining thread. - */ -#define PaUnixThreading_EXIT(result) \ - do { \ - PaError* pres = NULL; \ - if( paNoError != (result) ) \ - { \ - pres = malloc( sizeof (PaError) ); \ - *pres = (result); \ - } \ - pthread_exit( pres ); \ - } while (0); - -/** Spawn a thread. - * - * Intended for spawning the callback thread from the main thread. This function can even block (for a certain - * time or indefinitely) until notified by the callback thread (using PaUnixThread_NotifyParent), which can be - * useful in order to make sure that callback has commenced before returning from Pa_StartStream. - * @param threadFunc: The function to be executed in the child thread. - * @param waitForChild: If not 0, wait for child thread to call PaUnixThread_NotifyParent. Less than 0 means - * wait for ever, greater than 0 wait for the specified time. - * @param rtSched: Enable realtime scheduling? - * @return: If timed out waiting on child, paTimedOut. - */ -PaError PaUnixThread_New( PaUnixThread* self, void* (*threadFunc)( void* ), void* threadArg, PaTime waitForChild, - int rtSched ); - -/** Terminate thread. - * - * @param wait: If true, request that background thread stop and wait until it does, else cancel it. - * @param exitResult: If non-null this will upon return contain the exit status of the thread. - */ -PaError PaUnixThread_Terminate( PaUnixThread* self, int wait, PaError* exitResult ); - -/** Prepare to notify waiting parent thread. - * - * An internal lock must be held before the parent is notified in PaUnixThread_NotifyParent, call this to - * acquire it beforehand. - * @return: If parent is not waiting, paInternalError. - */ -PaError PaUnixThread_PrepareNotify( PaUnixThread* self ); - -/** Notify waiting parent thread. - * - * @return: If parent timed out waiting, paTimedOut. If parent was never waiting, paInternalError. - */ -PaError PaUnixThread_NotifyParent( PaUnixThread* self ); - -/** Has the parent thread requested this thread to stop? - */ -int PaUnixThread_StopRequested( PaUnixThread* self ); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ -#endif diff --git a/spaces/anaclaudia13ct/insect_detection/utils/loggers/__init__.py b/spaces/anaclaudia13ct/insect_detection/utils/loggers/__init__.py deleted file mode 100644 index 22da87034f2415fde48acf2612bf3bac86baf239..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/loggers/__init__.py +++ /dev/null @@ -1,411 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Logging utils -""" - -import os -import warnings -from pathlib import Path - -import pkg_resources as pkg -import torch -from torch.utils.tensorboard import SummaryWriter - -from utils.general import LOGGER, colorstr, cv2 -from utils.loggers.clearml.clearml_utils import ClearmlLogger -from utils.loggers.wandb.wandb_utils import WandbLogger -from utils.plots import plot_images, plot_labels, plot_results -from utils.torch_utils import de_parallel - -LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML -RANK = int(os.getenv('RANK', -1)) - -try: - import wandb - - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: - try: - wandb_login_success = wandb.login(timeout=30) - except wandb.errors.UsageError: # known non-TTY terminal issue - wandb_login_success = False - if not wandb_login_success: - wandb = None -except (ImportError, AssertionError): - wandb = None - -try: - import clearml - - assert hasattr(clearml, '__version__') # verify package import not local dir -except (ImportError, AssertionError): - clearml = None - -try: - if RANK not in [0, -1]: - comet_ml = None - else: - import comet_ml - - assert hasattr(comet_ml, '__version__') # verify package import not local dir - from utils.loggers.comet import CometLogger - -except (ModuleNotFoundError, ImportError, AssertionError): - comet_ml = None - - -class Loggers(): - # YOLOv5 Loggers class - def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): - self.save_dir = save_dir - self.weights = weights - self.opt = opt - self.hyp = hyp - self.plots = not opt.noplots # plot results - self.logger = logger # for printing results to console - self.include = include - self.keys = [ - 'train/box_loss', - 'train/obj_loss', - 'train/cls_loss', # train loss - 'metrics/precision', - 'metrics/recall', - 'metrics/mAP_0.5', - 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', - 'val/obj_loss', - 'val/cls_loss', # val loss - 'x/lr0', - 'x/lr1', - 'x/lr2'] # params - self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] - for k in LOGGERS: - setattr(self, k, None) # init empty logger dictionary - self.csv = True # always log to csv - - # Messages - # if not wandb: - # prefix = colorstr('Weights & Biases: ') - # s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs in Weights & Biases" - # self.logger.info(s) - if not clearml: - prefix = colorstr('ClearML: ') - s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" - self.logger.info(s) - if not comet_ml: - prefix = colorstr('Comet: ') - s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" - self.logger.info(s) - # TensorBoard - s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') - self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(s)) - - # W&B - if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None - self.opt.hyp = self.hyp # add hyperparameters - self.wandb = WandbLogger(self.opt, run_id) - # temp warn. because nested artifacts not supported after 0.12.10 - # if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.11'): - # s = "YOLOv5 temporarily requires wandb version 0.12.10 or below. Some features may not work as expected." - # self.logger.warning(s) - else: - self.wandb = None - - # ClearML - if clearml and 'clearml' in self.include: - try: - self.clearml = ClearmlLogger(self.opt, self.hyp) - except Exception: - self.clearml = None - prefix = colorstr('ClearML: ') - LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' - f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') - - else: - self.clearml = None - - # Comet - if comet_ml and 'comet' in self.include: - if isinstance(self.opt.resume, str) and self.opt.resume.startswith("comet://"): - run_id = self.opt.resume.split("/")[-1] - self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) - - else: - self.comet_logger = CometLogger(self.opt, self.hyp) - - else: - self.comet_logger = None - - @property - def remote_dataset(self): - # Get data_dict if custom dataset artifact link is provided - data_dict = None - if self.clearml: - data_dict = self.clearml.data_dict - if self.wandb: - data_dict = self.wandb.data_dict - if self.comet_logger: - data_dict = self.comet_logger.data_dict - - return data_dict - - def on_train_start(self): - if self.comet_logger: - self.comet_logger.on_train_start() - - def on_pretrain_routine_start(self): - if self.comet_logger: - self.comet_logger.on_pretrain_routine_start() - - def on_pretrain_routine_end(self, labels, names): - # Callback runs on pre-train routine end - if self.plots: - plot_labels(labels, names, self.save_dir) - paths = self.save_dir.glob('*labels*.jpg') # training labels - if self.wandb: - self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) - # if self.clearml: - # pass # ClearML saves these images automatically using hooks - if self.comet_logger: - self.comet_logger.on_pretrain_routine_end(paths) - - def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): - log_dict = dict(zip(self.keys[0:3], vals)) - # Callback runs on train batch end - # ni: number integrated batches (since train start) - if self.plots: - if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename - plot_images(imgs, targets, paths, f) - if ni == 0 and self.tb and not self.opt.sync_bn: - log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) - if ni == 10 and (self.wandb or self.clearml): - files = sorted(self.save_dir.glob('train*.jpg')) - if self.wandb: - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Mosaics') - - if self.comet_logger: - self.comet_logger.on_train_batch_end(log_dict, step=ni) - - def on_train_epoch_end(self, epoch): - # Callback runs on train epoch end - if self.wandb: - self.wandb.current_epoch = epoch + 1 - - if self.comet_logger: - self.comet_logger.on_train_epoch_end(epoch) - - def on_val_start(self): - if self.comet_logger: - self.comet_logger.on_val_start() - - def on_val_image_end(self, pred, predn, path, names, im): - # Callback runs on val image end - if self.wandb: - self.wandb.val_one_image(pred, predn, path, names, im) - if self.clearml: - self.clearml.log_image_with_boxes(path, pred, names, im) - - def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): - if self.comet_logger: - self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) - - def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): - # Callback runs on val end - if self.wandb or self.clearml: - files = sorted(self.save_dir.glob('val*.jpg')) - if self.wandb: - self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) - if self.clearml: - self.clearml.log_debug_samples(files, title='Validation') - - if self.comet_logger: - self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) - - def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): - # Callback runs at the end of each fit (train+val) epoch - x = dict(zip(self.keys, vals)) - if self.csv: - file = self.save_dir / 'results.csv' - n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in x.items(): - self.tb.add_scalar(k, v, epoch) - elif self.clearml: # log to ClearML if TensorBoard not used - for k, v in x.items(): - title, series = k.split('/') - self.clearml.task.get_logger().report_scalar(title, series, v, epoch) - - if self.wandb: - if best_fitness == fi: - best_results = [epoch] + vals[3:7] - for i, name in enumerate(self.best_keys): - self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary - self.wandb.log(x) - self.wandb.end_epoch(best_result=best_fitness == fi) - - if self.clearml: - self.clearml.current_epoch_logged_images = set() # reset epoch image limit - self.clearml.current_epoch += 1 - - if self.comet_logger: - self.comet_logger.on_fit_epoch_end(x, epoch=epoch) - - def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): - # Callback runs on model save event - if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: - if self.wandb: - self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) - if self.clearml: - self.clearml.task.update_output_model(model_path=str(last), - model_name='Latest Model', - auto_delete_file=False) - - if self.comet_logger: - self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) - - def on_train_end(self, last, best, epoch, results): - # Callback runs on training end, i.e. saving best model - if self.plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] - files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter - self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") - - if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log(dict(zip(self.keys[3:10], results))) - self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) - # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model - if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), - type='model', - name=f'run_{self.wandb.wandb_run.id}_model', - aliases=['latest', 'best', 'stripped']) - self.wandb.finish_run() - - if self.clearml and not self.opt.evolve: - self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), - name='Best Model', - auto_delete_file=False) - - if self.comet_logger: - final_results = dict(zip(self.keys[3:10], results)) - self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) - - def on_params_update(self, params: dict): - # Update hyperparams or configs of the experiment - if self.wandb: - self.wandb.wandb_run.config.update(params, allow_val_change=True) - if self.comet_logger: - self.comet_logger.on_params_update(params) - - -class GenericLogger: - """ - YOLOv5 General purpose logger for non-task specific logging - Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) - Arguments - opt: Run arguments - console_logger: Console logger - include: loggers to include - """ - - def __init__(self, opt, console_logger, include=('tb', 'wandb')): - # init default loggers - self.save_dir = Path(opt.save_dir) - self.include = include - self.console_logger = console_logger - self.csv = self.save_dir / 'results.csv' # CSV logger - if 'tb' in self.include: - prefix = colorstr('TensorBoard: ') - self.console_logger.info( - f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") - self.tb = SummaryWriter(str(self.save_dir)) - - if wandb and 'wandb' in self.include: - self.wandb = wandb.init(project=web_project_name(str(opt.project)), - name=None if opt.name == "exp" else opt.name, - config=opt) - else: - self.wandb = None - - def log_metrics(self, metrics, epoch): - # Log metrics dictionary to all loggers - if self.csv: - keys, vals = list(metrics.keys()), list(metrics.values()) - n = len(metrics) + 1 # number of cols - s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header - with open(self.csv, 'a') as f: - f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') - - if self.tb: - for k, v in metrics.items(): - self.tb.add_scalar(k, v, epoch) - - if self.wandb: - self.wandb.log(metrics, step=epoch) - - def log_images(self, files, name='Images', epoch=0): - # Log images to all loggers - files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path - files = [f for f in files if f.exists()] # filter by exists - - if self.tb: - for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') - - if self.wandb: - self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) - - def log_graph(self, model, imgsz=(640, 640)): - # Log model graph to all loggers - if self.tb: - log_tensorboard_graph(self.tb, model, imgsz) - - def log_model(self, model_path, epoch=0, metadata={}): - # Log model to all loggers - if self.wandb: - art = wandb.Artifact(name=f"run_{wandb.run.id}_model", type="model", metadata=metadata) - art.add_file(str(model_path)) - wandb.log_artifact(art) - - def update_params(self, params): - # Update the paramters logged - if self.wandb: - wandb.run.config.update(params, allow_val_change=True) - - -def log_tensorboard_graph(tb, model, imgsz=(640, 640)): - # Log model graph to TensorBoard - try: - p = next(model.parameters()) # for device, type - imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand - im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) - with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning - tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) - except Exception as e: - LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') - - -def web_project_name(project): - # Convert local project name to web project name - if not project.startswith('runs/train'): - return project - suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' - return f'YOLOv5{suffix}' diff --git a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/spec_gen.py b/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/spec_gen.py deleted file mode 100644 index dee24de568736b65670a9058aa6331b46b113228..0000000000000000000000000000000000000000 --- a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/spec_gen.py +++ /dev/null @@ -1,22 +0,0 @@ -from data_utils import TextAudioSpeakerLoader, EvalDataLoader -import json -from tqdm import tqdm - -from utils import HParams - -config_path = 'configs/config.json' -with open(config_path, "r") as f: - data = f.read() -config = json.loads(data) -hps = HParams(**config) - -train_dataset = TextAudioSpeakerLoader("filelists/train.txt", hps) -test_dataset = TextAudioSpeakerLoader("filelists/test.txt", hps) -eval_dataset = TextAudioSpeakerLoader("filelists/val.txt", hps) - -for _ in tqdm(train_dataset): - pass -for _ in tqdm(eval_dataset): - pass -for _ in tqdm(test_dataset): - pass diff --git a/spaces/arijitdas123student/meeting-summarizer/app.py b/spaces/arijitdas123student/meeting-summarizer/app.py deleted file mode 100644 index 78bdb11da8f161a8c46d14ac39171a28b940e81a..0000000000000000000000000000000000000000 --- a/spaces/arijitdas123student/meeting-summarizer/app.py +++ /dev/null @@ -1,11 +0,0 @@ -# For this demo we're using Gradio, Hugging Face Spaces, Pytorch and Hugging Face Transformers -import gradio as gr -from gradio.mix import Parallel, Series - -# Summarizes Meeting Transcripts using Google Research's PEGASUS library -summarizer = gr.Interface.load("huggingface/google/pegasus-cnn_dailymail") -output_text = gr.outputs.Textbox() - -# Displays the end results to a webpage (i.e. here HuggingFace Spaces) - -Series(summarizer, inputs = gr.inputs.Textbox(lines=10, label="Meeting Transcript")).launch() \ No newline at end of file diff --git a/spaces/arju10/traditional_cloth_recognizer/README.md b/spaces/arju10/traditional_cloth_recognizer/README.md deleted file mode 100644 index b6733ca82f6cf16083e3a0844661c6bfba8f3085..0000000000000000000000000000000000000000 --- a/spaces/arju10/traditional_cloth_recognizer/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Traditional Cloth Recognizer -emoji: 🏆 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -HuggingFace Spaces App URL: https://huggingface.co/spaces/msideadman/cap-recognizer - -Gradio App URL: https://897f24d5bf9ba00855.gradio.live diff --git a/spaces/arnavkartikeya/SCRIPture-final/models/med.py b/spaces/arnavkartikeya/SCRIPture-final/models/med.py deleted file mode 100644 index 7b00a35450b736180a805d4f4664b4fb95aeba01..0000000000000000000000000000000000000000 --- a/spaces/arnavkartikeya/SCRIPture-final/models/med.py +++ /dev/null @@ -1,955 +0,0 @@ -''' - * Copyright (c) 2022, salesforce.com, inc. - * All rights reserved. - * SPDX-License-Identifier: BSD-3-Clause - * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause - * By Junnan Li - * Based on huggingface code base - * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert -''' - -import math -import os -import warnings -from dataclasses import dataclass -from typing import Optional, Tuple - -import torch -from torch import Tensor, device, dtype, nn -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss -import torch.nn.functional as F - -from transformers.activations import ACT2FN -from transformers.file_utils import ( - ModelOutput, -) -from transformers.modeling_outputs import ( - BaseModelOutputWithPastAndCrossAttentions, - BaseModelOutputWithPoolingAndCrossAttentions, - CausalLMOutputWithCrossAttentions, - MaskedLMOutput, - MultipleChoiceModelOutput, - NextSentencePredictorOutput, - QuestionAnsweringModelOutput, - SequenceClassifierOutput, - TokenClassifierOutput, -) -from transformers.modeling_utils import ( - PreTrainedModel, - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - prune_linear_layer, -) -from transformers.utils import logging -from transformers.models.bert.configuration_bert import BertConfig - - -logger = logging.get_logger(__name__) - - -class BertEmbeddings(nn.Module): - """Construct the embeddings from word and position embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - - self.config = config - - def forward( - self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 - ): - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] - - if inputs_embeds is None: - inputs_embeds = self.word_embeddings(input_ids) - - embeddings = inputs_embeds - - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class BertSelfAttention(nn.Module): - def __init__(self, config, is_cross_attention): - super().__init__() - self.config = config - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - "The hidden size (%d) is not a multiple of the number of attention " - "heads (%d)" % (config.hidden_size, config.num_attention_heads) - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - if is_cross_attention: - self.key = nn.Linear(config.encoder_width, self.all_head_size) - self.value = nn.Linear(config.encoder_width, self.all_head_size) - else: - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - self.save_attention = False - - def save_attn_gradients(self, attn_gradients): - self.attn_gradients = attn_gradients - - def get_attn_gradients(self): - return self.attn_gradients - - def save_attention_map(self, attention_map): - self.attention_map = attention_map - - def get_attention_map(self): - return self.attention_map - - def transpose_for_scores(self, x): - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(*new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - mixed_query_layer = self.query(hidden_states) - - # If this is instantiated as a cross-attention module, the keys - # and values come from an encoder; the attention mask needs to be - # such that the encoder's padding tokens are not attended to. - is_cross_attention = encoder_hidden_states is not None - - if is_cross_attention: - key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) - value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) - attention_mask = encoder_attention_mask - elif past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([past_key_value[0], key_layer], dim=2) - value_layer = torch.cat([past_key_value[1], value_layer], dim=2) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - past_key_value = (key_layer, value_layer) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - seq_length = hidden_states.size()[1] - position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in BertModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.Softmax(dim=-1)(attention_scores) - - if is_cross_attention and self.save_attention: - self.save_attention_map(attention_probs) - attention_probs.register_hook(self.save_attn_gradients) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs_dropped = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs_dropped = attention_probs_dropped * head_mask - - context_layer = torch.matmul(attention_probs_dropped, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(*new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - outputs = outputs + (past_key_value,) - return outputs - - -class BertSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertAttention(nn.Module): - def __init__(self, config, is_cross_attention=False): - super().__init__() - self.self = BertSelfAttention(config, is_cross_attention) - self.output = BertSelfOutput(config) - self.pruned_heads = set() - - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - ): - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -class BertIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -class BertOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states, input_tensor): - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class BertLayer(nn.Module): - def __init__(self, config, layer_num): - super().__init__() - self.config = config - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = BertAttention(config) - self.layer_num = layer_num - if self.config.add_cross_attention: - self.crossattention = BertAttention(config, is_cross_attention=self.config.add_cross_attention) - self.intermediate = BertIntermediate(config) - self.output = BertOutput(config) - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_value=None, - output_attentions=False, - mode=None, - ): - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - ) - attention_output = self_attention_outputs[0] - - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - - if mode=='multimodal': - assert encoder_hidden_states is not None, "encoder_hidden_states must be given for cross-attention layers" - - cross_attention_outputs = self.crossattention( - attention_output, - attention_mask, - head_mask, - encoder_hidden_states, - encoder_attention_mask, - output_attentions=output_attentions, - ) - attention_output = cross_attention_outputs[0] - outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class BertEncoder(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([BertLayer(config,i) for i in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states, - attention_mask=None, - head_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, - mode='multimodal', - ): - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None - - next_decoder_cache = () if use_cache else None - - for i in range(self.config.num_hidden_layers): - layer_module = self.layer[i] - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - if use_cache: - logger.warn( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - mode=mode, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - encoder_hidden_states, - encoder_attention_mask, - past_key_value, - output_attentions, - mode=mode, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - all_cross_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - cross_attentions=all_cross_attentions, - ) - - -class BertPooler(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.activation = nn.Tanh() - - def forward(self, hidden_states): - # We "pool" the model by simply taking the hidden state corresponding - # to the first token. - first_token_tensor = hidden_states[:, 0] - pooled_output = self.dense(first_token_tensor) - pooled_output = self.activation(pooled_output) - return pooled_output - - -class BertPredictionHeadTransform(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - if isinstance(config.hidden_act, str): - self.transform_act_fn = ACT2FN[config.hidden_act] - else: - self.transform_act_fn = config.hidden_act - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - - def forward(self, hidden_states): - hidden_states = self.dense(hidden_states) - hidden_states = self.transform_act_fn(hidden_states) - hidden_states = self.LayerNorm(hidden_states) - return hidden_states - - -class BertLMPredictionHead(nn.Module): - def __init__(self, config): - super().__init__() - self.transform = BertPredictionHeadTransform(config) - - # The output weights are the same as the input embeddings, but there is - # an output-only bias for each token. - self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) - - self.bias = nn.Parameter(torch.zeros(config.vocab_size)) - - # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` - self.decoder.bias = self.bias - - def forward(self, hidden_states): - hidden_states = self.transform(hidden_states) - hidden_states = self.decoder(hidden_states) - return hidden_states - - -class BertOnlyMLMHead(nn.Module): - def __init__(self, config): - super().__init__() - self.predictions = BertLMPredictionHead(config) - - def forward(self, sequence_output): - prediction_scores = self.predictions(sequence_output) - return prediction_scores - - -class BertPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = BertConfig - base_model_prefix = "bert" - _keys_to_ignore_on_load_missing = [r"position_ids"] - - def _init_weights(self, module): - """ Initialize the weights """ - if isinstance(module, (nn.Linear, nn.Embedding)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - if isinstance(module, nn.Linear) and module.bias is not None: - module.bias.data.zero_() - - -class BertModel(BertPreTrainedModel): - """ - The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of - cross-attention is added between the self-attention layers, following the architecture described in `Attention is - all you need `__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, - Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. - argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an - input to the forward pass. - """ - - def __init__(self, config, add_pooling_layer=True): - super().__init__(config) - self.config = config - - self.embeddings = BertEmbeddings(config) - - self.encoder = BertEncoder(config) - - self.pooler = BertPooler(config) if add_pooling_layer else None - - self.init_weights() - - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - - def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor: - """ - Makes broadcastable attention and causal masks so that future and masked tokens are ignored. - - Arguments: - attention_mask (:obj:`torch.Tensor`): - Mask with ones indicating tokens to attend to, zeros for tokens to ignore. - input_shape (:obj:`Tuple[int]`): - The shape of the input to the model. - device: (:obj:`torch.device`): - The device of the input to the model. - - Returns: - :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. - """ - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - if attention_mask.dim() == 3: - extended_attention_mask = attention_mask[:, None, :, :] - elif attention_mask.dim() == 2: - # Provided a padding mask of dimensions [batch_size, seq_length] - # - if the model is a decoder, apply a causal mask in addition to the padding mask - # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] - if is_decoder: - batch_size, seq_length = input_shape - - seq_ids = torch.arange(seq_length, device=device) - causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] - # in case past_key_values are used we need to add a prefix ones mask to the causal mask - # causal and attention masks must have same type with pytorch version < 1.3 - causal_mask = causal_mask.to(attention_mask.dtype) - - if causal_mask.shape[1] < attention_mask.shape[1]: - prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] - causal_mask = torch.cat( - [ - torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), - causal_mask, - ], - axis=-1, - ) - - extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] - else: - extended_attention_mask = attention_mask[:, None, None, :] - else: - raise ValueError( - "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( - input_shape, attention_mask.shape - ) - ) - - # Since attention_mask is 1.0 for positions we want to attend and 0.0 for - # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. - # Since we are adding it to the raw scores before the softmax, this is - # effectively the same as removing these entirely. - extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility - extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 - return extended_attention_mask - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - is_decoder=False, - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if is_decoder: - use_cache = use_cache if use_cache is not None else self.config.use_cache - else: - use_cache = False - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = input_ids.size() - batch_size, seq_length = input_shape - device = input_ids.device - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = inputs_embeds.device - elif encoder_embeds is not None: - input_shape = encoder_embeds.size()[:-1] - batch_size, seq_length = input_shape - device = encoder_embeds.device - else: - raise ValueError("You have to specify either input_ids or inputs_embeds or encoder_embeds") - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - if attention_mask is None: - attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) - - # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] - # ourselves in which case we just need to make it broadcastable to all heads. - extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, - device, is_decoder) - - # If a 2D or 3D attention mask is provided for the cross-attention - # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] - if encoder_hidden_states is not None: - if type(encoder_hidden_states) == list: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size() - else: - encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() - encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) - - if type(encoder_attention_mask) == list: - encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask] - elif encoder_attention_mask is None: - encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) - else: - encoder_extended_attention_mask = None - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - if encoder_embeds is None: - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - else: - embedding_output = encoder_embeds - - encoder_outputs = self.encoder( - embedding_output, - attention_mask=extended_attention_mask, - head_mask=head_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_extended_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - mode=mode, - ) - sequence_output = encoder_outputs[0] - pooled_output = self.pooler(sequence_output) if self.pooler is not None else None - - if not return_dict: - return (sequence_output, pooled_output) + encoder_outputs[1:] - - return BaseModelOutputWithPoolingAndCrossAttentions( - last_hidden_state=sequence_output, - pooler_output=pooled_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - cross_attentions=encoder_outputs.cross_attentions, - ) - - - -class BertLMHeadModel(BertPreTrainedModel): - - _keys_to_ignore_on_load_unexpected = [r"pooler"] - _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] - - def __init__(self, config): - super().__init__(config) - - self.bert = BertModel(config, add_pooling_layer=False) - self.cls = BertOnlyMLMHead(config) - - self.init_weights() - - def get_output_embeddings(self): - return self.cls.predictions.decoder - - def set_output_embeddings(self, new_embeddings): - self.cls.predictions.decoder = new_embeddings - - def forward( - self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - return_logits=False, - is_decoder=True, - reduction='mean', - mode='multimodal', - ): - r""" - encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if - the model is configured as a decoder. - encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in - the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): - Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in - ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are - ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` - past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` - (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` - instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. - use_cache (:obj:`bool`, `optional`): - If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up - decoding (see :obj:`past_key_values`). - Returns: - Example:: - >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig - >>> import torch - >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') - >>> config = BertConfig.from_pretrained("bert-base-cased") - >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) - >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") - >>> outputs = model(**inputs) - >>> prediction_logits = outputs.logits - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if labels is not None: - use_cache = False - - outputs = self.bert( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - is_decoder=is_decoder, - mode=mode, - ) - - sequence_output = outputs[0] - prediction_scores = self.cls(sequence_output) - - if return_logits: - return prediction_scores[:, :-1, :].contiguous() - - lm_loss = None - if labels is not None: - # we are doing next-token prediction; shift prediction scores and input ids by one - shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() - labels = labels[:, 1:].contiguous() - loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) - lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) - if reduction=='none': - lm_loss = lm_loss.view(prediction_scores.size(0),-1).sum(1) - - if not return_dict: - output = (prediction_scores,) + outputs[2:] - return ((lm_loss,) + output) if lm_loss is not None else output - - return CausalLMOutputWithCrossAttentions( - loss=lm_loss, - logits=prediction_scores, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - cross_attentions=outputs.cross_attentions, - ) - - def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): - input_shape = input_ids.shape - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - if attention_mask is None: - attention_mask = input_ids.new_ones(input_shape) - - # cut decoder_input_ids if past is used - if past is not None: - input_ids = input_ids[:, -1:] - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "past_key_values": past, - "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), - "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), - "is_decoder": True, - } - - def _reorder_cache(self, past, beam_idx): - reordered_past = () - for layer_past in past: - reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) - return reordered_past diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Protocol/test_SecretSharing.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Protocol/test_SecretSharing.py deleted file mode 100644 index 0ea58a574b0f1b444cf182d89927842e98c07837..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/SelfTest/Protocol/test_SecretSharing.py +++ /dev/null @@ -1,267 +0,0 @@ -# -# SelfTest/Protocol/test_secret_sharing.py: Self-test for secret sharing protocols -# -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -from unittest import main, TestCase, TestSuite -from binascii import unhexlify, hexlify - -from Crypto.Util.py3compat import * -from Crypto.SelfTest.st_common import list_test_cases - -from Crypto.Protocol.SecretSharing import Shamir, _Element, \ - _mult_gf2, _div_gf2 - -class GF2_Tests(TestCase): - - def test_mult_gf2(self): - # Prove mult by zero - x = _mult_gf2(0,0) - self.assertEqual(x, 0) - - # Prove mult by unity - x = _mult_gf2(34, 1) - self.assertEqual(x, 34) - - z = 3 # (x+1) - y = _mult_gf2(z, z) - self.assertEqual(y, 5) # (x+1)^2 = x^2 + 1 - y = _mult_gf2(y, z) - self.assertEqual(y, 15) # (x+1)^3 = x^3 + x^2 + x + 1 - y = _mult_gf2(y, z) - self.assertEqual(y, 17) # (x+1)^4 = x^4 + 1 - - # Prove linearity works - comps = [1, 4, 128, 2**34] - sum_comps = 1+4+128+2**34 - y = 908 - z = _mult_gf2(sum_comps, y) - w = 0 - for x in comps: - w ^= _mult_gf2(x, y) - self.assertEqual(w, z) - - def test_div_gf2(self): - from Crypto.Util.number import size as deg - - x, y = _div_gf2(567, 7) - self.assertTrue(deg(y) < deg(7)) - - w = _mult_gf2(x, 7) ^ y - self.assertEqual(567, w) - - x, y = _div_gf2(7, 567) - self.assertEqual(x, 0) - self.assertEqual(y, 7) - -class Element_Tests(TestCase): - - def test1(self): - # Test encondings - e = _Element(256) - self.assertEqual(int(e), 256) - self.assertEqual(e.encode(), bchr(0)*14 + b("\x01\x00")) - - e = _Element(bchr(0)*14 + b("\x01\x10")) - self.assertEqual(int(e), 0x110) - self.assertEqual(e.encode(), bchr(0)*14 + b("\x01\x10")) - - # Only 16 byte string are a valid encoding - self.assertRaises(ValueError, _Element, bchr(0)) - - def test2(self): - # Test addition - e = _Element(0x10) - f = _Element(0x0A) - self.assertEqual(int(e+f), 0x1A) - - def test3(self): - # Test multiplication - zero = _Element(0) - one = _Element(1) - two = _Element(2) - - x = _Element(6) * zero - self.assertEqual(int(x), 0) - - x = _Element(6) * one - self.assertEqual(int(x), 6) - - x = _Element(2**127) * two - self.assertEqual(int(x), 1 + 2 + 4 + 128) - - def test4(self): - # Test inversion - one = _Element(1) - - x = one.inverse() - self.assertEqual(int(x), 1) - - x = _Element(82323923) - y = x.inverse() - self.assertEqual(int(x * y), 1) - -class Shamir_Tests(TestCase): - - def test1(self): - # Test splitting - shares = Shamir.split(2, 3, bchr(90)*16) - self.assertEqual(len(shares), 3) - for index in range(3): - self.assertEqual(shares[index][0], index+1) - self.assertEqual(len(shares[index][1]), 16) - - def test2(self): - # Test recombine - from itertools import permutations - - test_vectors = ( - (2, "d9fe73909bae28b3757854c0af7ad405", - "1-594ae8964294174d95c33756d2504170", - "2-d897459d29da574eb40e93ec552ffe6e", - "3-5823de9bf0e068b054b5f07a28056b1b", - "4-db2c1f8bff46d748f795da995bd080cb"), - (2, "bf4f902d9a7efafd1f3ffd9291fd5de9", - "1-557bd3b0748064b533469722d1cc7935", - "2-6b2717164783c66d47cd28f2119f14d0", - "3-8113548ba97d58256bb4424251ae300c", - "4-179e9e5a218483ddaeda57539139cf04"), - (3, "ec96aa5c14c9faa699354cf1da74e904", - "1-64579fbf1908d66f7239bf6e2b4e41e1", - "2-6cd9428df8017b52322561e8c672ae3e", - "3-e418776ef5c0579bd9299277374806dd", - "4-ab3f77a0107398d23b323e581bb43f5d", - "5-23fe42431db2b41bd03ecdc7ea8e97ac"), - (3, "44cf249b68b80fcdc27b47be60c2c145", - "1-d6515a3905cd755119b86e311c801e31", - "2-16693d9ac9f10c254036ced5f8917fa3", - "3-84f74338a48476b99bf5e75a84d3a0d1", - "4-3fe8878dc4a5d35811cf3cbcd33dbe52", - "5-ad76f92fa9d0a9c4ca0c1533af7f6132"), - (5, "5398717c982db935d968eebe53a47f5a", - "1-be7be2dd4c068e7ef576aaa1b1c11b01", - "2-f821f5848441cb98b3eb467e2733ee21", - "3-25ee52f53e203f6e29a0297b5ab486b5", - "4-fc9fb58ef74dab947fbf9acd9d5d83cd", - "5-b1949cce46d81552e65f248d3f74cc5c", - "6-d64797f59977c4d4a7956ad916da7699", - "7-ab608a6546a8b9af8820ff832b1135c7"), - (5, "4a78db90fbf35da5545d2fb728e87596", - "1-08daf9a25d8aa184cfbf02b30a0ed6a0", - "2-dda28261e36f0b14168c2cf153fb734e", - "3-e9fdec5505d674a57f9836c417c1ecaa", - "4-4dce5636ae06dee42d2c82e65f06c735", - "5-3963dc118afc2ba798fa1d452b28ef00", - "6-6dfe6ff5b09e94d2f84c382b12f42424", - "7-6faea9d4d4a4e201bf6c90b9000630c3"), - (10, "eccbf6d66d680b49b073c4f1ddf804aa", - "01-7d8ac32fe4ae209ead1f3220fda34466", - "02-f9144e76988aad647d2e61353a6e96d5", - "03-b14c3b80179203363922d60760271c98", - "04-770bb2a8c28f6cee89e00f4d5cc7f861", - "05-6e3d7073ea368334ef67467871c66799", - "06-248792bc74a98ce024477c13c8fb5f8d", - "07-fcea4640d2db820c0604851e293d2487", - "08-2776c36fb714bb1f8525a0be36fc7dba", - "09-6ee7ac8be773e473a4bf75ee5f065762", - "10-33657fc073354cf91d4a68c735aacfc8", - "11-7645c65094a5868bf225c516fdee2d0c", - "12-840485aacb8226631ecd9c70e3018086"), - (10, "377e63bdbb5f7d4dc58a483d035212bb", - "01-32c53260103be431c843b1a633afe3bd", - "02-0107eb16cb8695084d452d2cc50bc7d6", - "03-df1e5c66cd755287fb0446faccd72a06", - "04-361bbcd5d40797f49dfa1898652da197", - "05-160d3ad1512f7dec7fd9344aed318591", - "06-659af6d95df4f25beca4fb9bfee3b7e8", - "07-37f3b208977bad50b3724566b72bfa9d", - "08-6c1de2dfc69c2986142c26a8248eb316", - "09-5e19220837a396bd4bc8cd685ff314c3", - "10-86e7b864fb0f3d628e46d50c1ba92f1c", - "11-065d0082c80b1aea18f4abe0c49df72e", - "12-84a09430c1d20ea9f388f3123c3733a3"), - ) - - def get_share(p): - pos = p.find('-') - return int(p[:pos]), unhexlify(p[pos + 1:]) - - for tv in test_vectors: - k = tv[0] - secret = unhexlify(tv[1]) - max_perms = 10 - for perm, shares_idx in enumerate(permutations(range(2, len(tv)), k)): - if perm > max_perms: - break - shares = [ get_share(tv[x]) for x in shares_idx ] - result = Shamir.combine(shares, True) - self.assertEqual(secret, result) - - def test3(self): - # Loopback split/recombine - secret = unhexlify(b("000102030405060708090a0b0c0d0e0f")) - - shares = Shamir.split(2, 3, secret) - - secret2 = Shamir.combine(shares[:2]) - self.assertEqual(secret, secret2) - - secret3 = Shamir.combine([ shares[0], shares[2] ]) - self.assertEqual(secret, secret3) - - def test4(self): - # Loopback split/recombine (SSSS) - secret = unhexlify(b("000102030405060708090a0b0c0d0e0f")) - - shares = Shamir.split(2, 3, secret, ssss=True) - - secret2 = Shamir.combine(shares[:2], ssss=True) - self.assertEqual(secret, secret2) - - def test5(self): - # Detect duplicate shares - secret = unhexlify(b("000102030405060708090a0b0c0d0e0f")) - - shares = Shamir.split(2, 3, secret) - self.assertRaises(ValueError, Shamir.combine, (shares[0], shares[0])) - - -def get_tests(config={}): - tests = [] - tests += list_test_cases(GF2_Tests) - tests += list_test_cases(Element_Tests) - tests += list_test_cases(Shamir_Tests) - return tests - -if __name__ == '__main__': - suite = lambda: TestSuite(get_tests()) - main(defaultTest='suite') - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/edge_tts/__main__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/edge_tts/__main__.py deleted file mode 100644 index b36f114e5639826410741daa4d3c0e73911ece21..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/edge_tts/__main__.py +++ /dev/null @@ -1,8 +0,0 @@ -""" -__main__ for edge_tts. -""" - -from .util import main - -if __name__ == "__main__": - main() diff --git a/spaces/as-god/gsdf-Counterfeit-V2.5/app.py b/spaces/as-god/gsdf-Counterfeit-V2.5/app.py deleted file mode 100644 index 3e61c8452c0bc94ea6cf8e7fd4fab00c30fccba4..0000000000000000000000000000000000000000 --- a/spaces/as-god/gsdf-Counterfeit-V2.5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gsdf/Counterfeit-V2.5").launch() \ No newline at end of file diff --git a/spaces/atticus/image-text-retrival-huster/misc/evaluation.py b/spaces/atticus/image-text-retrival-huster/misc/evaluation.py deleted file mode 100644 index 47ea3e18886ca61f6d05d5ca7f099cd4719f129b..0000000000000000000000000000000000000000 --- a/spaces/atticus/image-text-retrival-huster/misc/evaluation.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ****************** -Copyright (c) 2018 [Thomson Licensing] -All Rights Reserved -This program contains proprietary information which is a trade secret/business \ -secret of [Thomson Licensing] and is protected, even if ucpublished, under \ -applicable Copyright laws (including French droit d'auteur) and/or may be \ -subject to one or more patent(s). -Recipient is to retain this program in confidence and is not permitted to use \ -or make copies thereof other than as permitted in a written agreement with \ -[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \ -by [Thomson Licensing] under express agreement. -Thomson Licensing is a company of the group TECHNICOLOR -******************************************************************************* -This scripts permits one to reproduce training and experiments of: - Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April). - Finding beans in burgers: Deep semantic-visual embedding with localization. - In Proceedings of CVPR (pp. 3984-3993) - -Author: Martin Engilberge -""" -from scripts.postprocess import postprocess -import numpy as np - -from misc.utils import flatten - -def cosine_sim(A, B): - img_norm = np.linalg.norm(A, axis=1) - caps_norm = np.linalg.norm(B, axis=1) - - scores = np.dot(A, B.T) - - norms = np.dot(np.expand_dims(img_norm, 1), - np.expand_dims(caps_norm.T, 1).T) - - scores = (scores / norms) - - return scores - -def recallTopK(cap_enc, imgs_enc, imgs_path, method, ks=10, scores=None): - - if scores is None: - scores = cosine_sim(cap_enc, imgs_enc) - - # recall_imgs = [imgs_path[np.asnumpy(i)] for i in np.argsort(scores, axis=1)[0][::-1][:ks]] - recall_imgs = [imgs_path[i] for i in np.argsort(scores, axis=1)[0][::-1][:ks]] - postprocess(method, recall_imgs) - - return recall_imgs - -def recall_at_k_multi_cap(imgs_enc, caps_enc, ks=[1, 5, 10], scores=None): - if scores is None: - scores = cosine_sim(imgs_enc[::5, :], caps_enc) - - ranks = np.array([np.nonzero(np.in1d(row, np.arange(x * 5, x * 5 + 5, 1)))[0][0] - for x, row in enumerate(np.argsort(scores, axis=1)[:, ::-1])]) - - medr_caps_search = np.median(ranks) - - recall_caps_search = list() - - for k in [1, 5, 10]: - recall_caps_search.append( - (float(len(np.where(ranks < k)[0])) / ranks.shape[0]) * 100) - - ranks = np.array([np.nonzero(row == int(x / 5.0))[0][0] - for x, row in enumerate(np.argsort(scores.T, axis=1)[:, ::-1])]) - - medr_imgs_search = np.median(ranks) - - recall_imgs_search = list() - for k in ks: - recall_imgs_search.append( - (float(len(np.where(ranks < k)[0])) / ranks.shape[0]) * 100) - - return recall_caps_search, recall_imgs_search, medr_caps_search, medr_imgs_search - - -def avg_recall(imgs_enc, caps_enc): - """ Compute 5 fold recall on set of 1000 images """ - res = list() - if len(imgs_enc) % 5000 == 0: - max_iter = len(imgs_enc) - else: - max_iter = len(imgs_enc) - 5000 - - for i in range(0, max_iter, 5000): - imgs = imgs_enc[i:i + 5000] - caps = caps_enc[i:i + 5000] - res.append(recall_at_k_multi_cap(imgs, caps)) - - return [np.sum([x[i] for x in res], axis=0) / len(res) for i in range(len(res[0]))] - - -def eval_recall(imgs_enc, caps_enc): - - imgs_enc = np.vstack(flatten(imgs_enc)) - caps_enc = np.vstack(flatten(caps_enc)) - - res = avg_recall(imgs_enc, caps_enc) - - return res diff --git a/spaces/auto-academic/auto-draft/section_generator.py b/spaces/auto-academic/auto-draft/section_generator.py deleted file mode 100644 index 75cdde734c9de0ab165e8137c6e35d803281610a..0000000000000000000000000000000000000000 --- a/spaces/auto-academic/auto-draft/section_generator.py +++ /dev/null @@ -1,112 +0,0 @@ -# from utils.prompts import generate_paper_prompts, generate_keywords_prompts, generate_experiments_prompts, generate_bg_summary_prompts -from utils.prompts import generate_paper_prompts, generate_bg_summary_prompts -# from utils.gpt_interaction import get_responses #, extract_responses, extract_keywords, extract_json -from utils.figures import generate_random_figures -import time -import os -from utils.prompts import KEYWORDS_SYSTEM, SECTION_GENERATION_SYSTEM -from utils.gpt_interaction import get_gpt_responses -import json - -# three GPT-based content generator: -# 1. section_generation: used to generate main content of the paper -# 2. keywords_generation: used to generate a json output {key1: output1, key2: output2} for multiple purpose. -# 3. figure_generation: used to generate sample figures. -# all generator should return the token usage. - -MAX_ATTEMPTS = 6 - - -def section_generation_bg(paper, section, save_to_path, model): - """ - todo: this part should be revised - The main pipeline of generating a section. - 1. Generate prompts. - 2. Get responses from AI assistant. - 3. Extract the section text. - 4. Save the text to .tex file. - :return usage - """ - print(f"Generating {section}...") - prompts = generate_bg_summary_prompts(paper, section) - # gpt_response, usage = get_responses(prompts, model) - gpt_response, usage = get_gpt_responses(prompts, model) - output = gpt_response # extract_responses(gpt_response) - paper["body"][section] = output - tex_file = os.path.join(save_to_path, f"{section}.tex") - # tex_file = save_to_path + f"/{section}.tex" - if section == "abstract": - with open(tex_file, "w") as f: - f.write(r"\begin{abstract}") - with open(tex_file, "a") as f: - f.write(output) - with open(tex_file, "a") as f: - f.write(r"\end{abstract}") - else: - with open(tex_file, "w") as f: - f.write(f"\section{{{section.upper()}}}\n") - with open(tex_file, "a") as f: - f.write(output) - time.sleep(5) - print(f"{section} has been generated. Saved to {tex_file}.") - return usage - - -def section_generation(paper, section, save_to_path, model, research_field="machine learning"): - """ - The main pipeline of generating a section. - 1. Generate prompts. - 2. Get responses from AI assistant. - 3. Extract the section text. - 4. Save the text to .tex file. - :return usage - """ - prompts = generate_paper_prompts(paper, section) - output, usage = get_gpt_responses(SECTION_GENERATION_SYSTEM.format(research_field=research_field), prompts, - model=model, temperature=0.4) - paper["body"][section] = output - tex_file = os.path.join(save_to_path, f"{section}.tex") - with open(tex_file, "w", encoding="utf-8") as f: - f.write(output) - time.sleep(5) - return usage - - -def keywords_generation(input_dict, default_keywords=None): - """ - Input: - input_dict: a dictionary containing the title of a paper. - default_keywords: if anything went wrong, return this keywords. - - Output: - a dictionary including all keywords and their importance score. - - Input example: {"title": "The title of a Machine Learning Paper"} - Output Example: {"machine learning": 5, "reinforcement learning": 2} - """ - title = input_dict.get("title") - attempts_count = 0 - while (attempts_count < MAX_ATTEMPTS) and (title is not None): - try: - keywords, usage = get_gpt_responses(KEYWORDS_SYSTEM.format(min_refs_num=1, max_refs_num=10), title, - model="gpt-3.5-turbo", temperature=0.4) - print(keywords) - output = json.loads(keywords) - return output.keys(), usage - except json.decoder.JSONDecodeError: - attempts_count += 1 - time.sleep(10) - # Default references - print("Error: Keywords generation has failed. Return the default keywords.") - if default_keywords is None or isinstance(default_keywords, dict): - return {"machine learning": 10} - else: - return default_keywords - -# def figures_generation(paper, save_to_path, model): -# # todo: this function is not complete. -# prompts = generate_experiments_prompts(paper) -# gpt_response, usage = get_responses(prompts, model) -# list_of_methods = list(extract_json(gpt_response)) -# generate_random_figures(list_of_methods, os.path.join(save_to_path, "comparison.png")) -# return usage diff --git a/spaces/autumn8/selectModel/app-idea-4.py b/spaces/autumn8/selectModel/app-idea-4.py deleted file mode 100644 index 70fa233f1c80dc0bd4f144eb1d0ffc8413b44b4a..0000000000000000000000000000000000000000 --- a/spaces/autumn8/selectModel/app-idea-4.py +++ /dev/null @@ -1,705 +0,0 @@ -from transformers import TextClassificationPipeline -from transformers import AutoTokenizer -from transformers import pipeline -import evaluate -import gradio as gr -import torch -import random -from transformers.file_utils import is_tf_available, is_torch_available, is_torch_tpu_available -from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments -from datasets import load_metric -from sklearn.model_selection import train_test_split -import pandas as pd -import numpy as np -import streamlit as st -from textblob import TextBlob -from streamlit_extras.switch_page_button import switch_page -from transformers import YolosImageProcessor, YolosForObjectDetection -from PIL import Image -import torch -import requests -import numpy as np -import torchvision -from torchvision.io import read_image -from torchvision.utils import draw_bounding_boxes -from transformers import DetrImageProcessor, DetrForObjectDetection -from transformers import DetrImageProcessor, DetrForObjectDetection -from transformers import pipeline -import torch -from transformers import PegasusForConditionalGeneration, PegasusTokenizer - - -st.set_page_config(layout="wide") -def get_models(prompt): - #prompt = input("Enter your AI task idea:") - response = pipe(prompt) - print("AI Model Idea: ", prompt,"\n") - - x = pd.json_normalize(response[0]) - # x.nlargest(3,['score'])["label"].values - knowledge_base_tasks = ['depth-estimation', 'image-classification', 'image-segmentation', - 'image-to-image', 'object-detection', 'video-classification', - 'unconditional-image-generation', 'zero-shot-image-classification', - 'conversational', 'fill-mask', 'question-answering', - 'sentence-similarity', 'summarization', 'table-question-answering', - 'text-classification', 'text-generation', 'token-classification', - 'translation', 'zero-shot-classification'] - - temp = [] - for label_code in x.nlargest(3,['score'])["label"].values: - temp.append(label_code[6:]) - # temp - - cat_to_model = {} - top_cats = [] - - for i in range(len(temp)): - print("Possible Category ",i+1," : ",knowledge_base_tasks[int(temp[i])]) - print("Top three models for this category are:",models_list[models_list["pipeline_tag"] == knowledge_base_tasks[int(temp[i])]].nlargest(3,"downloads")["modelId"].values) - cat_to_model[knowledge_base_tasks[int(temp[i])]] = models_list[models_list["pipeline_tag"] == knowledge_base_tasks[int(temp[i])]].nlargest(3,"downloads")["modelId"].values - top_cats.append(knowledge_base_tasks[int(temp[i])]) - # models_list[models_list["pipeline_tag"] == "image-classification"].nlargest(3,"downloads")["modelId"].values - print() - print("Returning category-models dictionary..") - return top_cats,cat_to_model - - - -def get_top_3(top_cat): - - top_3_df = pd.read_csv("./Top_3_models.csv") - top_3 = [] - for i in range(top_3_df.shape[0]): - if top_3_df["Category"].iloc[i].lower() == top_cat: - top_3.append(top_3_df["Model_1"].iloc[i]) - top_3.append(top_3_df["Model_2"].iloc[i]) - top_3.append(top_3_df["Model_3"].iloc[i]) - break - return top_3 - - - -def get_response(input_text,model_name): - torch_device = 'cuda' if torch.cuda.is_available() else 'cpu' - tokenizer = PegasusTokenizer.from_pretrained(model_name) - model = PegasusForConditionalGeneration.from_pretrained(model_name).to(torch_device) - batch = tokenizer([input_text],truncation=True,padding='longest',max_length=1024, return_tensors="pt").to(torch_device) - gen_out = model.generate(**batch,max_length=128,num_beams=5, num_return_sequences=1, temperature=1.5) - output_text = tokenizer.batch_decode(gen_out, skip_special_tokens=True) - return output_text - - -def summarizer (models, data): - model_Eval = {} - for i in range (len(models)): - # print(models[i]) - if models[i] == 'tuner007/pegasus_summarizer': - model_name = 'tuner007/pegasus_summarizer' - - result = get_response(data,model_name) - rouge = evaluate.load('rouge') - # print("345",rouge.compute(predictions=[result],references=[data])) - print(type(result), type([data])) - quality = rouge.compute(predictions=[result[0]],references=[data]) - model_Eval[models[i]] = {"Score":quality,"Result": result} - else: - summarizer_model = pipeline("summarization", model = models[i]) - print(models[i], summarizer_model(data)) - try: - result = summarizer_model(data)[0]["summary_text"] - rouge = evaluate.load('rouge') - # print("345",rouge.compute(predictions=[result],references=[data])) - quality = rouge.compute(predictions=[result],references=[data]) - model_Eval[models[i]] = {"Score":quality,"Result": result} - except: - print("Model {} has issues.".format(models[i])) - - return model_Eval - - - - -def best_model (analysis, data): - best_model_score = 0 - best_model_name = "" - best_model_result = "" - temp2 = 0 - for model in analysis.keys(): - temp1 = analysis[model]["Score"]["rougeLsum"] - if temp1 > temp2: - temp2 = analysis[model]["Score"]["rougeLsum"] - best_model_score = analysis[model]["Score"] - best_model_name = model - best_model_result = analysis[model]["Result"] - - return best_model_name, best_model_score,data[:50],best_model_result.replace("\n","") - - - -def text_summarization(): - top_models = get_top_3("summarization") -# st.write("Upload your file: ") -# uploaded_files = "" -# uploaded_files = st.file_uploader("Choose your file", accept_multiple_files=True) - - - - - option = st.selectbox( - 'What text would you like AI to summarize for you?', - ("Choose text files below:",'How to Win friends - Text', 'mocktext', '--')) #add 2 other options of files here - - if option == 'How to Win friends - Text' or option == 'mocktext' or option == '--':### update book text files here - st.write('You selected:', option) - - if option == 'How to Win friends - Text': # add text - name = "How_to_win_friends.txt" - st.write("Selected file for analyis is: How_to_win_friends.txt") - - if option == 'mocktext': - name = "mocktext.txt" - st.write("Selected file for analyis is: mocktext.txt") - - if option == '--': - name = "--" - st.write("--") - - - - if st.button("Done"): - global file_data -# st.write("filename:", uploaded_files) -# for uploaded_file in uploaded_files: -# # print("here") -# file_data = open(uploaded_file.name,encoding="utf8").read() -# st.write("filename:", uploaded_file.name) -# # st.write(file_data[:500]) -# # print("before summarizer") -# print(file_data[:50]) - file_data = open(name,encoding="utf8").read() - - analysis = summarizer(models = top_models, data = file_data[:500]) - - x,c,v,b = best_model(analysis,file_data[:500]) -# st.write("Best model for Task: ",z) - - st.markdown(f'

      {"Best Model with Summarization Results"}

      ', unsafe_allow_html=True) - st.write("\nBest model name: ",x) -# st.write("\nBest model Score: ",c) - - st.write("Best Model Rouge Scores: ") - st.write("Rouge 1 Score: ",c["rouge1"]) - st.write("Rouge 2 Score: ",c["rouge2"]) - st.write("Rouge L Score: ",c["rougeL"]) - st.write("Rouge LSum Score: ",c["rougeLsum"]) - - st.write("\nOriginal Data first 50 characters: ", v) - st.write("\nBest Model Result: ",b) - - -# print("between summarizer analysis") - st.markdown(f'

      {"Summarization Results for Model 1"}

      ', unsafe_allow_html=True) -# st.write("Summarization Results for Model 1") - st.write("Model name: facebook/bart-large-cnn") - st.write("Rouge Scores: ") - st.write("Rouge 1 Score: ",analysis["facebook/bart-large-cnn"]["Score"]["rouge1"]) - st.write("Rouge 2 Score: ",analysis["facebook/bart-large-cnn"]["Score"]["rouge2"]) - st.write("Rouge L Score: ",analysis["facebook/bart-large-cnn"]["Score"]["rougeL"]) - st.write(f"Rouge LSum Score: ",analysis["facebook/bart-large-cnn"]["Score"]["rougeLsum"]) - st.write("Result: ", analysis["facebook/bart-large-cnn"]["Result"]) - - st.markdown(f'

      {"Summarization Results for Model 2"}

      ', unsafe_allow_html=True) -# st.write("Summarization Results for Model 2") - st.write("Model name: tuner007/pegasus_summarizer") - st.write("Rouge Scores: ") - st.write("Rouge 1 Score: ",analysis["tuner007/pegasus_summarizer"]["Score"]["rouge1"]) - st.write("Rouge 2 Score: ",analysis["tuner007/pegasus_summarizer"]["Score"]["rouge2"]) - st.write("Rouge L Score: ",analysis["tuner007/pegasus_summarizer"]["Score"]["rougeL"]) - st.write("Rouge LSum Score: ",analysis["tuner007/pegasus_summarizer"]["Score"]["rougeLsum"]) - st.write("Result: ", analysis["tuner007/pegasus_summarizer"]["Result"][0]) - - - - st.markdown(f'

      {"Summarization Results for Model 3"}

      ', unsafe_allow_html=True) -# st.write("Summarization Results for Model 3") - st.write("Model name: sshleifer/distilbart-cnn-12-6") - st.write("Rouge Scores: ") - st.write("Rouge 1 Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rouge1"]) - st.write("Rouge 2 Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rouge2"]) - st.write("Rouge L Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rougeL"]) - st.write("Rouge LSum Score: ",analysis["sshleifer/distilbart-cnn-12-6"]["Score"]["rougeLsum"]) - - st.write("Result: ", analysis["sshleifer/distilbart-cnn-12-6"]["Result"]) - - - - -#OBJECT DETECTION - -def yolo_tiny(name): - image = read_image(name) - - model = YolosForObjectDetection.from_pretrained('hustvl/yolos-tiny') - image_processor = YolosImageProcessor.from_pretrained("hustvl/yolos-tiny") - - inputs = image_processor(images=image, return_tensors="pt") - outputs = model(**inputs) - - # model predicts bounding boxes and corresponding COCO classes - logits = outputs.logits - bboxes = outputs.pred_boxes - - - # print results - target_sizes = torch.tensor([image.shape[::-1][:2]]) - - results = image_processor.post_process_object_detection(outputs, threshold=0.7, target_sizes=target_sizes)[0] - - label_ = [] - bboxes = [] - - for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): - box = [round(i, 2) for i in box.tolist()] - print( - f"Detected {model.config.id2label[label.item()]} with confidence " - f"{round(score.item(), 3)} at location {box}" - ) - - label_.append(model.config.id2label[label.item()]) - bboxes.append(np.asarray(box,dtype="int")) - bboxes = torch.tensor(bboxes, dtype=torch.int) - - img=draw_bounding_boxes(image, bboxes,labels = label_, width=3) - img = torchvision.transforms.ToPILImage()(img) - return img -# img.show() - - - -def resnet_101(name): - image = read_image(name) - processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-101") - model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-101") - - inputs = processor(images=image, return_tensors="pt") - outputs = model(**inputs) - - # convert outputs (bounding boxes and class logits) to COCO API - # let's only keep detections with score > 0.9 - target_sizes = torch.tensor([image.shape[::-1][:2]]) - results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0] - label_ = [] - bboxes = [] - for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): - box = [round(i, 2) for i in box.tolist()] - print( - f"Detected {model.config.id2label[label.item()]} with confidence " - f"{round(score.item(), 3)} at location {box}") - label_.append(model.config.id2label[label.item()]) - bboxes.append(np.asarray(box,dtype="int")) - bboxes = torch.tensor(bboxes, dtype=torch.int) - - - bboxes = torch.tensor(bboxes, dtype=torch.int) - - img=draw_bounding_boxes(image, bboxes,labels = label_, width=3) - img = torchvision.transforms.ToPILImage()(img) - return img - - - - - -def resnet_50(name): - image = read_image(name) - processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50") - model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50") - - inputs = processor(images=image, return_tensors="pt") - outputs = model(**inputs) - - # convert outputs (bounding boxes and class logits) to COCO API - # let's only keep detections with score > 0.9 - target_sizes = torch.tensor([image.shape[::-1][:2]]) - results = processor.post_process_object_detection(outputs, target_sizes=target_sizes, threshold=0.7)[0] - label_ = [] - bboxes = [] - for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): - box = [round(i, 2) for i in box.tolist()] - print( - f"Detected {model.config.id2label[label.item()]} with confidence " - f"{round(score.item(), 3)} at location {box}" - ) - label_.append(model.config.id2label[label.item()]) - bboxes.append(np.asarray(box,dtype="int")) - bboxes = torch.tensor(bboxes, dtype=torch.int) - - bboxes = torch.tensor(bboxes, dtype=torch.int) - - img=draw_bounding_boxes(image, bboxes,labels = label_, width=3) - img = torchvision.transforms.ToPILImage()(img) - return img - - - -def object_detection(): -# st.write("Upload your image: ") -# uploaded_files = "" -# uploaded_files = st.file_uploader("Choose a image file", accept_multiple_files=True) - - option = st.selectbox( - 'What image you want for analysis?', - ("Choose an image for object detection analysis from the options below:",'Cat and Dog', '2 lazy cats chilling on a couch', 'An astronaut riding wild horse')) - - if option == 'Cat and Dog' or option == '2 lazy cats chilling on a couch' or option == 'An astronaut riding wild horse': - st.write('You selected:', option) - - if option == 'Cat and Dog': - name = "cat_dog.jpg" - st.image("cat_dog.jpg") - - if option == '2 lazy cats chilling on a couch': - name = "cat_remote.jpg" - st.image("cat_remote.jpg") - - if option == 'An astronaut riding wild horse': - name = "astronaut_rides_horse.png" - st.image("astronaut_rides_horse.png") - - if st.button("Done"): - # global file_data -# st.write("filename:", uploaded_files) -# for uploaded_file in uploaded_files: - # print("here") - # file_data = open(uploaded_file.name).read() - st.write("filename:", name) -# name = uploaded_file.name - st.image([yolo_tiny(name),resnet_101(name),resnet_50(name)],caption=["hustvl/yolos-tiny","facebook/detr-resnet-101","facebook/detr-resnet-50"]) - - -def task_categorization_model_predictions(): - st.image("./panelup.png") - - # st.title("Text Analysis App") - - data = "" - - classifier = pipeline("zero-shot-classification",model="facebook/bart-large-mnli") - - global check - - st.markdown(f'

      {"Write down below the description of your AI application in few sentences:"}

      ', unsafe_allow_html=True) - - prompt = st.text_input(" ") - - st.write("") - st.write("") - - if prompt != "": - # sbert_saved_model = torch.load("Sbert_saved_model", map_location=torch.device('cpu')).to("cpu") - # model = sbert_saved_model.to("cpu") - # tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/all-mpnet-base-v2") - # pipe = TextClassificationPipeline(model= model, tokenizer=tokenizer, return_all_scores=True) - # # outputs a list of dicts like [[{'label': 'NEGATIVE', 'score': 0.0001223755971295759}, {'label': 'POSITIVE', 'score': 0.9998776316642761}]] - - # # prompt = ["What is the the best ai for putting text report into data table?","How can I generate car sales agreement with ai model?","AI model to detect burglar on 48 hours of cctv video footage","I need Ai model help me with rewriting 50 financial statements emails into one summary report ?","I need a model for extracting person from an image"] - # # responses = pipe(prompt) - - - # models_list = pd.read_csv("models.csv") - # # st.write(get_top_3(prompt)) - - # top_cat, top_models = get_top_3(prompt) - # # prompt = input("Enter your AI task idea:") - # # top_cats,cat_to_models = get_models(prompt) - - # # top_models = cat_to_models[top_cats[0]] - - # top_cat = " " + top_cat[0].upper() + top_cat[1:] - - - - st.markdown(f'

      {"Recognized AI Domain: "}

      ', unsafe_allow_html=True) - - domains = ["Computer Vision Task","Natural Language Processing Problem","Audio Operations Problem","Tabular Data Task","Reinforcement Learning Problem","Time Series Forecasting Problem"] - - - - #st.write(classifier(prompt, domains)) - domain = classifier(prompt, domains)["labels"][0] - - st.markdown(f'

      {domain}

      ', unsafe_allow_html=True) - # st.write("Recommended AI Domain Type: ",top_cat) - check = 0 - if st.button("This seems accurate"): - check = 1 - if st.button("Show me other likely category recommendations:"): - if domain == "Tabular Data Problem": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - if st.button("Multimodal AI Model"): - domain = "Multimodal AI Model" - check = 1 - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - # if st.button("Tabular Data Task"): - # domain = "Tabular Data Task" - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - - if domain == "Computer Vision Task": - # if st.button("Computer Vision Task"): - # domain = "Computer Vision Task" - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - - if st.button("Multimodal AI Model"): - domain = "Multimodal AI Model" - check = 1 - - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - - if domain == "Natural Language Processing Problem": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - # if st.button("Natural Language Processing Problem"): - # domain = "Natural Language Processing Problem" - if st.button("Multimodal AI Model"): - domain = "multimodal" - check = 1 - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - - if domain == "Multimodal AI Model": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - # if st.button("Multimodal AI Model"): - # domain = "Multimodal AI Model" - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - - if domain == "audio": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - if st.button("Multimodal AI Model"): - domain = "Multimodal AI Model" - check = 1 - # if st.button("Audio Operations Problem"): - # domain = "Audio Operations Problem" - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - - if domain == "reinforcement-learning": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - if st.button("Multimodal AI Model"): - domain = "multimodal" - check = 1 - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - # if st.button("Reinforcement Learning Problem"): - # domain = "Reinforcement Learning Problem" - if st.button("Time Series Forecasting Problem"): - domain = "Time Series Forecasting Problem" - check = 1 - - if domain == "Time Series Forecasting": - if st.button("Computer Vision Task"): - domain = "Computer Vision Task" - check = 1 - if st.button("Natural Language Processing Problem"): - domain = "Natural Language Processing Problem" - check = 1 - if st.button("Multimodal AI Model"): - domain = "Multimodal AI Model" - check = 1 - if st.button("Audio Operations Problem"): - domain = "Audio Operations Problem" - check = 1 - if st.button("Tabular Data Task"): - domain = "Tabular Data Task" - check = 1 - if st.button("Reinforcement Learning Problem"): - domain = "Reinforcement Learning Problem" - check = 1 - # if st.button("Time Series Forecasting Problem"): - # domain = "Time Series Forecasting Problem" - - # st.write("Recommended Models for category: ",top_cats[0], " are:",top_models) - - # st.write("Recommended Task category: ",top_models[0]) - - - - knowledge_base_tasks = {"Computer Vision Task":['depth-estimation', 'image-classification', 'image-segmentation', - 'image-to-image', 'object-detection', 'video-classification', - 'unconditional-image-generation', 'zero-shot-image-classification'],"Natural Language Processing Problem":[ - 'conversational', 'fill-mask', 'question-answering', - 'sentence-similarity', 'summarization', 'table-question-answering', - 'text-classification', 'text-generation', 'token-classification', - 'translation', 'zero-shot-classification'],"Audio Operations Problem":["audio-classification","audio-to-audio","automatic-speech-recognition", - "text-to-speech"],"Tabular Data Task":["tabular-classification","tabular-regression"],"others":["document-question-answering", - "feature-extraction","image-to-text","text-to-image","text-to-video","visual-question-answering"], - "Reinforcement Learning Problem":["reinforcement-learning"],"time-series-forecasting":["time-series-forecasting"]} - - # st.write(check) - # st.write(domain) - if check == 1: - - category = classifier(prompt, knowledge_base_tasks[domain])["labels"][0] - - - st.markdown(f'

      {"Recognized sub category in Domain: "+domain}

      ', unsafe_allow_html=True) - - st.markdown(f'

      {category}

      ', unsafe_allow_html=True) - - - top_models = get_top_3(category) - #st.write(top_models) - st.markdown(f'

      {"The best models selected for this domain:"}

      ', unsafe_allow_html=True) - - - st.markdown(f'

      {"1- "+top_models[0]}

      ', unsafe_allow_html=True) - - st.image("./buttons1.png") - - # if st.button("Show more"): - - st.markdown(f'

      {"2- "+top_models[1]}

      ', unsafe_allow_html=True) - st.image("./buttons1.png") - - - st.markdown(f'

      {"3- "+top_models[2]}

      ', unsafe_allow_html=True) - st.image("./buttons1.png") - - - - - - - - - - -page_names_to_funcs = { - "Pick the best Model for your AI app":task_categorization_model_predictions, - "Compare Object Detection Performance": object_detection, - "Compare Document Summarization Performance": text_summarization -} - -demo_name = st.sidebar.selectbox("Pick the best model for your next AI task or compare model performance if to advance your builds", page_names_to_funcs.keys()) -page_names_to_funcs[demo_name]() - - - -# st.write("Recommended Most Popular Model for category ",top_cat, " is:",top_models[0]) -# if st.button("Show more"): -# for i in range(1,len(top_models)): -# st.write("Model#",str(i+1),top_models[i]) - - -# data = prompt - -# # print("before len data") - -# if len(data) != 0: -# # print("after len data") -# st.write("Recommended Task category: ",top_cats[0]) -# st.write("Recommended Most Popular Model for category ",top_cats[0], " is:",top_models[0]) -# if st.button("Show more"): -# for i in range(1,len(top_models)): -# st.write("Model#",str(i+1),top_models[i]) - -# st.write("Upload your file: ") -# uploaded_files = "" -# uploaded_files = st.file_uploader("Choose a text file", accept_multiple_files=True) -# if st.button("Done"): -# global file_data -# st.write("filename:", uploaded_files) -# for uploaded_file in uploaded_files: -# # print("here") -# file_data = open(uploaded_file.name,encoding="utf8").read() -# st.write("filename:", uploaded_file.name) -# # st.write(file_data[:500]) -# # print("before summarizer") -# print(file_data[:500]) -# analysis = summarizer(models = top_models, data = file_data[:500]) -# # print("between summarizer analysis") - -# z,x,c,v,b = best_model(analysis,file_data[:500]) -# st.write("Best model for Task: ",z) -# st.write("\nBest model name: ",x) -# st.write("\nBest model Score: ",c) -# st.write("\nOriginal Data first 500 characters: ", v) -# st.write("\nBest Model Result: ",b) -# st.success(result) diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimized_txt2img.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimized_txt2img.py deleted file mode 100644 index 5022cac9f59183556811526400d585d8e82831d4..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/optimizedSD/optimized_txt2img.py +++ /dev/null @@ -1,347 +0,0 @@ -import argparse, os, re -import torch -import numpy as np -from random import randint -from omegaconf import OmegaConf -from PIL import Image -from tqdm import tqdm, trange -from itertools import islice -from einops import rearrange -from torchvision.utils import make_grid -import time -from pytorch_lightning import seed_everything -from torch import autocast -from contextlib import contextmanager, nullcontext -from ldmlib.util import instantiate_from_config -from optimUtils import split_weighted_subprompts, logger -from transformers import logging -# from samplers import CompVisDenoiser -logging.set_verbosity_error() - - -def chunk(it, size): - it = iter(it) - return iter(lambda: tuple(islice(it, size)), ()) - - -def load_model_from_config(ckpt, verbose=False): - print(f"Loading model from {ckpt}") - pl_sd = torch.load(ckpt, map_location="cpu") - if "global_step" in pl_sd: - print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] - return sd - - -config = "optimizedSD/v1-inference.yaml" -DEFAULT_CKPT = "models/ldm/stable-diffusion-v1/model.ckpt" - -parser = argparse.ArgumentParser() - -parser.add_argument( - "--prompt", type=str, nargs="?", default="a painting of a virus monster playing guitar", help="the prompt to render" -) -parser.add_argument("--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/txt2img-samples") -parser.add_argument( - "--skip_grid", - action="store_true", - help="do not save a grid, only individual samples. Helpful when evaluating lots of samples", -) -parser.add_argument( - "--skip_save", - action="store_true", - help="do not save individual samples. For speed measurements.", -) -parser.add_argument( - "--ddim_steps", - type=int, - default=50, - help="number of ddim sampling steps", -) - -parser.add_argument( - "--fixed_code", - action="store_true", - help="if enabled, uses the same starting code across samples ", -) -parser.add_argument( - "--ddim_eta", - type=float, - default=0.0, - help="ddim eta (eta=0.0 corresponds to deterministic sampling", -) -parser.add_argument( - "--n_iter", - type=int, - default=1, - help="sample this often", -) -parser.add_argument( - "--H", - type=int, - default=512, - help="image height, in pixel space", -) -parser.add_argument( - "--W", - type=int, - default=512, - help="image width, in pixel space", -) -parser.add_argument( - "--C", - type=int, - default=4, - help="latent channels", -) -parser.add_argument( - "--f", - type=int, - default=8, - help="downsampling factor", -) -parser.add_argument( - "--n_samples", - type=int, - default=5, - help="how many samples to produce for each given prompt. A.k.a. batch size", -) -parser.add_argument( - "--n_rows", - type=int, - default=0, - help="rows in the grid (default: n_samples)", -) -parser.add_argument( - "--scale", - type=float, - default=7.5, - help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", -) -parser.add_argument( - "--device", - type=str, - default="cuda", - help="specify GPU (cuda/cuda:0/cuda:1/...)", -) -parser.add_argument( - "--from-file", - type=str, - help="if specified, load prompts from this file", -) -parser.add_argument( - "--seed", - type=int, - default=None, - help="the seed (for reproducible sampling)", -) -parser.add_argument( - "--unet_bs", - type=int, - default=1, - help="Slightly reduces inference time at the expense of high VRAM (value > 1 not recommended )", -) -parser.add_argument( - "--turbo", - action="store_true", - help="Reduces inference time on the expense of 1GB VRAM", -) -parser.add_argument( - "--precision", - type=str, - help="evaluate at this precision", - choices=["full", "autocast"], - default="autocast" -) -parser.add_argument( - "--format", - type=str, - help="output image format", - choices=["jpg", "png"], - default="png", -) -parser.add_argument( - "--sampler", - type=str, - help="sampler", - choices=["ddim", "plms","heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms"], - default="plms", -) -parser.add_argument( - "--ckpt", - type=str, - help="path to checkpoint of model", - default=DEFAULT_CKPT, -) -opt = parser.parse_args() - -tic = time.time() -os.makedirs(opt.outdir, exist_ok=True) -outpath = opt.outdir -grid_count = len(os.listdir(outpath)) - 1 - -if opt.seed == None: - opt.seed = randint(0, 1000000) -seed_everything(opt.seed) - -# Logging -logger(vars(opt), log_csv = "logs/txt2img_logs.csv") - -sd = load_model_from_config(f"{opt.ckpt}") -li, lo = [], [] -for key, value in sd.items(): - sp = key.split(".") - if (sp[0]) == "model": - if "input_blocks" in sp: - li.append(key) - elif "middle_block" in sp: - li.append(key) - elif "time_embed" in sp: - li.append(key) - else: - lo.append(key) -for key in li: - sd["model1." + key[6:]] = sd.pop(key) -for key in lo: - sd["model2." + key[6:]] = sd.pop(key) - -config = OmegaConf.load(f"{config}") - -model = instantiate_from_config(config.modelUNet) -_, _ = model.load_state_dict(sd, strict=False) -model.eval() -model.unet_bs = opt.unet_bs -model.cdevice = opt.device -model.turbo = opt.turbo - -modelCS = instantiate_from_config(config.modelCondStage) -_, _ = modelCS.load_state_dict(sd, strict=False) -modelCS.eval() -modelCS.cond_stage_model.device = opt.device - -modelFS = instantiate_from_config(config.modelFirstStage) -_, _ = modelFS.load_state_dict(sd, strict=False) -modelFS.eval() -del sd - -if opt.device != "cpu" and opt.precision == "autocast": - model.half() - modelCS.half() - -start_code = None -if opt.fixed_code: - start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=opt.device) - - -batch_size = opt.n_samples -n_rows = opt.n_rows if opt.n_rows > 0 else batch_size -if not opt.from_file: - assert opt.prompt is not None - prompt = opt.prompt - print(f"Using prompt: {prompt}") - data = [batch_size * [prompt]] - -else: - print(f"reading prompts from {opt.from_file}") - with open(opt.from_file, "r") as f: - text = f.read() - print(f"Using prompt: {text.strip()}") - data = text.splitlines() - data = batch_size * list(data) - data = list(chunk(sorted(data), batch_size)) - - -if opt.precision == "autocast" and opt.device != "cpu": - precision_scope = autocast -else: - precision_scope = nullcontext - -seeds = "" -with torch.no_grad(): - - all_samples = list() - for n in trange(opt.n_iter, desc="Sampling"): - for prompts in tqdm(data, desc="data"): - - sample_path = os.path.join(outpath, "_".join(re.split(":| ", prompts[0])))[:150] - os.makedirs(sample_path, exist_ok=True) - base_count = len(os.listdir(sample_path)) - - with precision_scope("cuda"): - modelCS.to(opt.device) - uc = None - if opt.scale != 1.0: - uc = modelCS.get_learned_conditioning(batch_size * [""]) - if isinstance(prompts, tuple): - prompts = list(prompts) - - subprompts, weights = split_weighted_subprompts(prompts[0]) - if len(subprompts) > 1: - c = torch.zeros_like(uc) - totalWeight = sum(weights) - # normalize each "sub prompt" and add it - for i in range(len(subprompts)): - weight = weights[i] - # if not skip_normalize: - weight = weight / totalWeight - c = torch.add(c, modelCS.get_learned_conditioning(subprompts[i]), alpha=weight) - else: - c = modelCS.get_learned_conditioning(prompts) - - shape = [opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f] - - if opt.device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelCS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - - samples_ddim = model.sample( - S=opt.ddim_steps, - conditioning=c, - seed=opt.seed, - shape=shape, - verbose=False, - unconditional_guidance_scale=opt.scale, - unconditional_conditioning=uc, - eta=opt.ddim_eta, - x_T=start_code, - sampler = opt.sampler, - ) - - modelFS.to(opt.device) - - print(samples_ddim.shape) - print("saving images") - for i in range(batch_size): - - x_samples_ddim = modelFS.decode_first_stage(samples_ddim[i].unsqueeze(0)) - x_sample = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) - x_sample = 255.0 * rearrange(x_sample[0].cpu().numpy(), "c h w -> h w c") - Image.fromarray(x_sample.astype(np.uint8)).save( - os.path.join(sample_path, "seed_" + str(opt.seed) + "_" + f"{base_count:05}.{opt.format}") - ) - seeds += str(opt.seed) + "," - opt.seed += 1 - base_count += 1 - - if opt.device != "cpu": - mem = torch.cuda.memory_allocated() / 1e6 - modelFS.to("cpu") - while torch.cuda.memory_allocated() / 1e6 >= mem: - time.sleep(1) - del samples_ddim - print("memory_final = ", torch.cuda.memory_allocated() / 1e6) - -toc = time.time() - -time_taken = (toc - tic) / 60.0 - -print( - ( - "Samples finished in {0:.2f} minutes and exported to " - + sample_path - + "\n Seeds used = " - + seeds[:-1] - ).format(time_taken) -) diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/sd_internal/__init__.py b/spaces/awaawawawa/iurf7irfuyytruyyugb/sd_internal/__init__.py deleted file mode 100644 index f2a9901f080ef6a837dccaee1548ccf9b661dba3..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/sd_internal/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -import json - -class Request: - session_id: str = "session" - prompt: str = "" - negative_prompt: str = "" - init_image: str = None # base64 - mask: str = None # base64 - num_outputs: int = 1 - num_inference_steps: int = 50 - guidance_scale: float = 7.5 - width: int = 512 - height: int = 512 - seed: int = 42 - prompt_strength: float = 0.8 - sampler: str = None # "ddim", "plms", "heun", "euler", "euler_a", "dpm2", "dpm2_a", "lms" - # allow_nsfw: bool = False - precision: str = "autocast" # or "full" - save_to_disk_path: str = None - turbo: bool = True - use_cpu: bool = False - use_full_precision: bool = False - use_face_correction: str = None # or "GFPGANv1.3" - use_upscale: str = None # or "RealESRGAN_x4plus" or "RealESRGAN_x4plus_anime_6B" - use_stable_diffusion_model: str = "sd-v1-4" - show_only_filtered_image: bool = False - output_format: str = "jpeg" # or "png" - - stream_progress_updates: bool = False - stream_image_progress: bool = False - - def json(self): - return { - "session_id": self.session_id, - "prompt": self.prompt, - "negative_prompt": self.negative_prompt, - "num_outputs": self.num_outputs, - "num_inference_steps": self.num_inference_steps, - "guidance_scale": self.guidance_scale, - "width": self.width, - "height": self.height, - "seed": self.seed, - "prompt_strength": self.prompt_strength, - "sampler": self.sampler, - "use_face_correction": self.use_face_correction, - "use_upscale": self.use_upscale, - "use_stable_diffusion_model": self.use_stable_diffusion_model, - "output_format": self.output_format, - } - - def to_string(self): - return f''' - session_id: {self.session_id} - prompt: {self.prompt} - negative_prompt: {self.negative_prompt} - seed: {self.seed} - num_inference_steps: {self.num_inference_steps} - sampler: {self.sampler} - guidance_scale: {self.guidance_scale} - w: {self.width} - h: {self.height} - precision: {self.precision} - save_to_disk_path: {self.save_to_disk_path} - turbo: {self.turbo} - use_cpu: {self.use_cpu} - use_full_precision: {self.use_full_precision} - use_face_correction: {self.use_face_correction} - use_upscale: {self.use_upscale} - use_stable_diffusion_model: {self.use_stable_diffusion_model} - show_only_filtered_image: {self.show_only_filtered_image} - output_format: {self.output_format} - - stream_progress_updates: {self.stream_progress_updates} - stream_image_progress: {self.stream_image_progress}''' - -class Image: - data: str # base64 - seed: int - is_nsfw: bool - path_abs: str = None - - def __init__(self, data, seed): - self.data = data - self.seed = seed - - def json(self): - return { - "data": self.data, - "seed": self.seed, - "path_abs": self.path_abs, - } - -class Response: - request: Request - images: list - - def json(self): - res = { - "status": 'succeeded', - "request": self.request.json(), - "output": [], - } - - for image in self.images: - res["output"].append(image.json()) - - return res diff --git a/spaces/awacke1/Flan-Upvote-Downvote-Human-Feedback/app.py b/spaces/awacke1/Flan-Upvote-Downvote-Human-Feedback/app.py deleted file mode 100644 index 30185ee08762c0396d36597ed525ba30149f03a4..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Flan-Upvote-Downvote-Human-Feedback/app.py +++ /dev/null @@ -1,77 +0,0 @@ - -import os -import asyncio -from concurrent.futures import ThreadPoolExecutor -import requests -import gradio as gr - - -examples = [ - ["Please answer to the following question. Who is going to be the next Ballon d'or?"], - ["Q: Can Barack Obama have a conversation with George Washington? Give the rationale before answering."], - ["Summarize the following text: Peter and Elizabeth took a taxi to attend the night party in the city. While in the party, Elizabeth collapsed and was rushed to the hospital. Since she was diagnosed with a brain injury, the doctor told Peter to stay besides her until she gets well. Therefore, Peter stayed with her at the hospital for 3 days without leaving."], - ["Please answer the following question: What is the boiling point of water?"], - ["Answer the following question by detailing your reasoning: Are Pokemons alive?"], - ["Translate to German: How old are you?"], - ["Generate a cooking recipe to make bolognese pasta:"], - ["Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?"], - ["Premise: At my age you will probably have learnt one lesson. Hypothesis: It's not certain how many lessons you'll learn by your thirties. Does the premise entail the hypothesis?"], - ["Answer the following question by reasoning step by step. The cafeteria had 23 apples. If they used 20 for lunch and bought 6 more, how many apples do they have?"], - ["""Q: Roger has 5 tennis balls. He buys 2 more cans of tennis balls. Each can has 3 tennis balls. How many tennis balls does he have now? -A: Roger started with 5 balls. 2 cans of 3 tennis balls each is 6 tennis balls. 5 + 6 = 11. The answer is 11. -Q: A juggler can juggle 16 balls. Half of the balls are golf balls, and half of the golf balls are blue. How many blue golf balls are there?"""], -] -title = "Flan UL2 vs Flan T5 XXL" -description = "This demo compares [Flan-T5-xxl](https://huggingface.co/google/flan-t5-xxl) and [Flan-UL2](https://huggingface.co/google/flan-ul2). Learn more about these models in their model card!" - - - -MAX_NEW_TOKENS = 256 -TOKEN = os.environ.get("API_TOKEN", None) -URLS = [ - "https://api-inference.huggingface.co/models/google/flan-ul2", - "https://api-inference.huggingface.co/models/google/flan-t5-xxl", -] - - -def fetch(session, text, api_url): - model = api_url.split("/")[-1] - response = session.post(api_url, json={"inputs": text, "parameters": {"max_new_tokens": MAX_NEW_TOKENS}}) - if response.status_code != 200: - return model, None - return model, response.json() - - - -async def inference(text): - with ThreadPoolExecutor(max_workers=2) as executor: - with requests.Session() as session: - session.headers = {"Authorization": f"Bearer {TOKEN}"} - # Initialize the event loop - loop = asyncio.get_event_loop() - tasks = [ - loop.run_in_executor( - executor, fetch, *(session, text, url) # Allows us to pass in multiple arguments to `fetch` - ) - for url in URLS - ] - - # Initializes the tasks to run and awaits their results - responses = [None, None] - for (model, response) in await asyncio.gather(*tasks): - if response is not None: - if model == "flan-ul2": - responses[0] = response[0]["generated_text"] - elif model == "flan-t5-xxl": - responses[1] = response[0]["generated_text"] - return responses - - -io = gr.Interface( - inference, - gr.Textbox(lines=3), - outputs=[gr.Textbox(lines=3, label="Flan T5-UL2"), gr.Textbox(lines=3, label="Flan T5-XXL")], - description=description, - examples=examples, -) -io.launch() \ No newline at end of file diff --git a/spaces/awacke1/HTML5-Aframe-Lsystems/style.css b/spaces/awacke1/HTML5-Aframe-Lsystems/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HTML5-Aframe-Lsystems/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/NSFW_text_classifier/app.py b/spaces/awacke1/NSFW_text_classifier/app.py deleted file mode 100644 index 549bc3c792207511958f3c0290d1f5440b5adb4f..0000000000000000000000000000000000000000 --- a/spaces/awacke1/NSFW_text_classifier/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/michellejieli/NSFW_text_classifier").launch() \ No newline at end of file diff --git a/spaces/awacke1/Spinning.Model-1-10/README.md b/spaces/awacke1/Spinning.Model-1-10/README.md deleted file mode 100644 index 915030822758f3e50982108a4b0cb845bc954f7d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Spinning.Model-1-10/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Spinning.Model 1 10 -emoji: 📊 -colorFrom: indigo -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/aymm/Task-Exploration-Hate-Speech/posts/conclusion.py b/spaces/aymm/Task-Exploration-Hate-Speech/posts/conclusion.py deleted file mode 100644 index 75b2fab5572b23de0bc101c1e4ecacbf4f4b9f99..0000000000000000000000000000000000000000 --- a/spaces/aymm/Task-Exploration-Hate-Speech/posts/conclusion.py +++ /dev/null @@ -1,18 +0,0 @@ -import streamlit as st - -title = "Key Takeaways" -description = "The key takeaways from this exploration" -date = "2022-01-26" -thumbnail = "images/huggingface_logo.png" - -def run_article(): - st.markdown(""" - # Conclusion - - Here are some of the main ideas we have conveyed in this exploration: - - Defining hate speech is hard and changes depending on your context and goals. - - Capturing a snapshot of what you've defined to be hate speech in a dataset is hard. - - Models learn lots of different things based on the data it sees, and that can include things you didn't intend for them to learn. - - Action items? - """) diff --git a/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/QuaternionLinearInterpolant.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/QuaternionLinearInterpolant.d.ts deleted file mode 100644 index 4b9f19601f1ae2e9ab0b79bad35db0839656dfc1..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/math/interpolants/QuaternionLinearInterpolant.d.ts +++ /dev/null @@ -1,12 +0,0 @@ -import { Interpolant } from '../Interpolant'; - -export class QuaternionLinearInterpolant extends Interpolant { - constructor( - parameterPositions: any, - samplesValues: any, - sampleSize: number, - resultBuffer?: any - ); - - interpolate_(i1: number, t0: number, t: number, t1: number): any; -} diff --git a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_lambert_vertex.glsl.js b/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_lambert_vertex.glsl.js deleted file mode 100644 index e503a878d7310b0404e5902b74cfce45ccbcca10..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/renderers/shaders/ShaderChunk/lights_lambert_vertex.glsl.js +++ /dev/null @@ -1,119 +0,0 @@ -export default /* glsl */` -vec3 diffuse = vec3( 1.0 ); - -GeometricContext geometry; -geometry.position = mvPosition.xyz; -geometry.normal = normalize( transformedNormal ); -geometry.viewDir = normalize( -mvPosition.xyz ); - -GeometricContext backGeometry; -backGeometry.position = geometry.position; -backGeometry.normal = -geometry.normal; -backGeometry.viewDir = geometry.viewDir; - -vLightFront = vec3( 0.0 ); -vIndirectFront = vec3( 0.0 ); - -#ifdef DOUBLE_SIDED - vLightBack = vec3( 0.0 ); - vIndirectBack = vec3( 0.0 ); -#endif - -IncidentLight directLight; -float dotNL; -vec3 directLightColor_Diffuse; - -#if NUM_POINT_LIGHTS > 0 - - #pragma unroll_loop - for ( int i = 0; i < NUM_POINT_LIGHTS; i ++ ) { - - getPointDirectLightIrradiance( pointLights[ i ], geometry, directLight ); - - dotNL = dot( geometry.normal, directLight.direction ); - directLightColor_Diffuse = PI * directLight.color; - - vLightFront += saturate( dotNL ) * directLightColor_Diffuse; - - #ifdef DOUBLE_SIDED - - vLightBack += saturate( -dotNL ) * directLightColor_Diffuse; - - #endif - - } - -#endif - -#if NUM_SPOT_LIGHTS > 0 - - #pragma unroll_loop - for ( int i = 0; i < NUM_SPOT_LIGHTS; i ++ ) { - - getSpotDirectLightIrradiance( spotLights[ i ], geometry, directLight ); - - dotNL = dot( geometry.normal, directLight.direction ); - directLightColor_Diffuse = PI * directLight.color; - - vLightFront += saturate( dotNL ) * directLightColor_Diffuse; - - #ifdef DOUBLE_SIDED - - vLightBack += saturate( -dotNL ) * directLightColor_Diffuse; - - #endif - } - -#endif - -/* -#if NUM_RECT_AREA_LIGHTS > 0 - - for ( int i = 0; i < NUM_RECT_AREA_LIGHTS; i ++ ) { - - // TODO (abelnation): implement - - } - -#endif -*/ - -#if NUM_DIR_LIGHTS > 0 - - #pragma unroll_loop - for ( int i = 0; i < NUM_DIR_LIGHTS; i ++ ) { - - getDirectionalDirectLightIrradiance( directionalLights[ i ], geometry, directLight ); - - dotNL = dot( geometry.normal, directLight.direction ); - directLightColor_Diffuse = PI * directLight.color; - - vLightFront += saturate( dotNL ) * directLightColor_Diffuse; - - #ifdef DOUBLE_SIDED - - vLightBack += saturate( -dotNL ) * directLightColor_Diffuse; - - #endif - - } - -#endif - -#if NUM_HEMI_LIGHTS > 0 - - #pragma unroll_loop - for ( int i = 0; i < NUM_HEMI_LIGHTS; i ++ ) { - - vIndirectFront += getHemisphereLightIrradiance( hemisphereLights[ i ], geometry ); - - #ifdef DOUBLE_SIDED - - vIndirectBack += getHemisphereLightIrradiance( hemisphereLights[ i ], backGeometry ); - - #endif - - } - -#endif -`; diff --git a/spaces/bankholdup/stylegan_petbreeder/e4e/metrics/LEC.py b/spaces/bankholdup/stylegan_petbreeder/e4e/metrics/LEC.py deleted file mode 100644 index 3eef2d2f00a4d757a56b6e845a8fde16aab306ab..0000000000000000000000000000000000000000 --- a/spaces/bankholdup/stylegan_petbreeder/e4e/metrics/LEC.py +++ /dev/null @@ -1,134 +0,0 @@ -import sys -import argparse -import torch -import numpy as np -from torch.utils.data import DataLoader - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs -from datasets.images_dataset import ImagesDataset -from utils.model_utils import setup_model - - -class LEC: - def __init__(self, net, is_cars=False): - """ - Latent Editing Consistency metric as proposed in the main paper. - :param net: e4e model loaded over the pSp framework. - :param is_cars: An indication as to whether or not to crop the middle of the StyleGAN's output images. - """ - self.net = net - self.is_cars = is_cars - - def _encode(self, images): - """ - Encodes the given images into StyleGAN's latent space. - :param images: Tensor of shape NxCxHxW representing the images to be encoded. - :return: Tensor of shape NxKx512 representing the latent space embeddings of the given image (in W(K, *) space). - """ - codes = self.net.encoder(images) - assert codes.ndim == 3, f"Invalid latent codes shape, should be NxKx512 but is {codes.shape}" - # normalize with respect to the center of an average face - if self.net.opts.start_from_latent_avg: - codes = codes + self.net.latent_avg.repeat(codes.shape[0], 1, 1) - return codes - - def _generate(self, codes): - """ - Generate the StyleGAN2 images of the given codes - :param codes: Tensor of shape NxKx512 representing the StyleGAN's latent codes (in W(K, *) space). - :return: Tensor of shape NxCxHxW representing the generated images. - """ - images, _ = self.net.decoder([codes], input_is_latent=True, randomize_noise=False, return_latents=True) - images = self.net.face_pool(images) - if self.is_cars: - images = images[:, :, 32:224, :] - return images - - @staticmethod - def _filter_outliers(arr): - arr = np.array(arr) - - lo = np.percentile(arr, 1, interpolation="lower") - hi = np.percentile(arr, 99, interpolation="higher") - return np.extract( - np.logical_and(lo <= arr, arr <= hi), arr - ) - - def calculate_metric(self, data_loader, edit_function, inverse_edit_function): - """ - Calculate the LEC metric score. - :param data_loader: An iterable that returns a tuple of (images, _), similar to the training data loader. - :param edit_function: A function that receives latent codes and performs a semantically meaningful edit in the - latent space. - :param inverse_edit_function: A function that receives latent codes and performs the inverse edit of the - `edit_function` parameter. - :return: The LEC metric score. - """ - distances = [] - with torch.no_grad(): - for batch in data_loader: - x, _ = batch - inputs = x.to(device).float() - - codes = self._encode(inputs) - edited_codes = edit_function(codes) - edited_image = self._generate(edited_codes) - edited_image_inversion_codes = self._encode(edited_image) - inverse_edit_codes = inverse_edit_function(edited_image_inversion_codes) - - dist = (codes - inverse_edit_codes).norm(2, dim=(1, 2)).mean() - distances.append(dist.to("cpu").numpy()) - - distances = self._filter_outliers(distances) - return distances.mean() - - -if __name__ == "__main__": - device = "cuda" - - parser = argparse.ArgumentParser(description="LEC metric calculator") - - parser.add_argument("--batch", type=int, default=8, help="batch size for the models") - parser.add_argument("--images_dir", type=str, default=None, - help="Path to the images directory on which we calculate the LEC score") - parser.add_argument("ckpt", metavar="CHECKPOINT", help="path to the model checkpoints") - - args = parser.parse_args() - print(args) - - net, opts = setup_model(args.ckpt, device) - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - - images_directory = dataset_args['test_source_root'] if args.images_dir is None else args.images_dir - test_dataset = ImagesDataset(source_root=images_directory, - target_root=images_directory, - source_transform=transforms_dict['transform_source'], - target_transform=transforms_dict['transform_test'], - opts=opts) - - data_loader = DataLoader(test_dataset, - batch_size=args.batch, - shuffle=False, - num_workers=2, - drop_last=True) - - print(f'dataset length: {len(test_dataset)}') - - # In the following example, we are using an InterfaceGAN based editing to calculate the LEC metric. - # Change the provided example according to your domain and needs. - direction = torch.load('../editings/interfacegan_directions/age.pt').to(device) - - def edit_func_example(codes): - return codes + 3 * direction - - - def inverse_edit_func_example(codes): - return codes - 3 * direction - - lec = LEC(net, is_cars='car' in opts.dataset_type) - result = lec.calculate_metric(data_loader, edit_func_example, inverse_edit_func_example) - print(f"LEC: {result}") diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/inference_gfpgan_20220326225636.py b/spaces/beihai/GFPGAN-V1.3-whole-image/.history/inference_gfpgan_20220326225636.py deleted file mode 100644 index ac814a50ead170ab69f32c1714bd445c6c5baf17..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/.history/inference_gfpgan_20220326225636.py +++ /dev/null @@ -1,116 +0,0 @@ -import argparse -import cv2 -import glob -import numpy as np -import os -import torch -from basicsr.utils import imwrite - -from gfpgan import GFPGANer - - -def main(): - """Inference demo for GFPGAN. - """ - parser = argparse.ArgumentParser() - parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image') - parser.add_argument('--arch', type=str, default='clean', help='The GFPGAN architecture. Option: clean | original') - parser.add_argument('--channel', type=int, default=2, help='Channel multiplier for large networks of StyleGAN2') - parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANv1.3.pth') - parser.add_argument('--bg_upsampler', type=str, default='realesrgan', help='background upsampler') - parser.add_argument( - '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing') - parser.add_argument('--test_path', type=str, default='inputs/whole_imgs', help='Input folder') - parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces') - parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face') - parser.add_argument('--aligned', action='store_true', help='Input are aligned faces') - parser.add_argument('--paste_back', action='store_false', help='Paste the restored faces back to images') - parser.add_argument('--save_root', type=str, default='results', help='Path to save root') - parser.add_argument( - '--ext', - type=str, - default='auto', - help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') - args = parser.parse_args() - - args = parser.parse_args() - if args.test_path.endswith('/'): - args.test_path = args.test_path[:-1] - os.makedirs(args.save_root, exist_ok=True) - - # background upsampler - if args.bg_upsampler == 'realesrgan': - if not torch.cuda.is_available(): # CPU - import warnings - warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. ' - 'If you really want to use it, please modify the corresponding codes.') - bg_upsampler = None - else: - from basicsr.archs.rrdbnet_arch import RRDBNet - from realesrgan import RealESRGANer - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - bg_upsampler = RealESRGANer( - scale=2, - model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth', - model=model, - tile=args.bg_tile, - tile_pad=10, - pre_pad=0, - half=True) # need to set False in CPU mode - else: - bg_upsampler = None - # set up GFPGAN restorer - restorer = GFPGANer( - model_path=args.model_path, - upscale=args.upscale, - arch=args.arch, - channel_multiplier=args.channel, - bg_upsampler=bg_upsampler) - - img_list = sorted(glob.glob(os.path.join(args.test_path, '*'))) - for img_path in img_list: - # read image - img_name = os.path.basename(img_path) - print(f'Processing {img_name} ...') - basename, ext = os.path.splitext(img_name) - input_img = cv2.imread(img_path, cv2.IMREAD_COLOR) - - # restore faces and background if necessary - cropped_faces, restored_faces, restored_img = restorer.enhance( - input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=args.paste_back) - - # save faces - for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)): - # save cropped face - save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png') - imwrite(cropped_face, save_crop_path) - # save restored face - if args.suffix is not None: - save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png' - else: - save_face_name = f'{basename}_{idx:02d}.png' - save_restore_path = os.path.join(args.save_root, 'restored_faces', save_face_name) - imwrite(restored_face, save_restore_path) - # save comparison image - cmp_img = np.concatenate((cropped_face, restored_face), axis=1) - imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png')) - - # save restored img - if restored_img is not None: - if args.ext == 'auto': - extension = ext[1:] - else: - extension = args.ext - - if args.suffix is not None: - save_restore_path = os.path.join(args.save_root, 'restored_imgs', - f'{basename}_{args.suffix}.{extension}') - else: - save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}') - imwrite(restored_img, save_restore_path) - - print(f'Results are in the [{args.save_root}] folder.') - - -if __name__ == '__main__': - main() diff --git a/spaces/beki/pii-anonymizer/app.py b/spaces/beki/pii-anonymizer/app.py deleted file mode 100644 index 20d29fec1557d098add26dd5fc9e39ea6ac9d784..0000000000000000000000000000000000000000 --- a/spaces/beki/pii-anonymizer/app.py +++ /dev/null @@ -1,212 +0,0 @@ - -"""Streamlit app for Presidio + Privy-trained PII models.""" - -import spacy -from spacy_recognizer import CustomSpacyRecognizer -from presidio_analyzer.nlp_engine import NlpEngineProvider -from presidio_anonymizer import AnonymizerEngine -from presidio_analyzer import AnalyzerEngine, RecognizerRegistry -import pandas as pd -from annotated_text import annotated_text -from json import JSONEncoder -import json -import warnings -import streamlit as st -import os -os.environ["TOKENIZERS_PARALLELISM"] = "false" -warnings.filterwarnings('ignore') -# from flair_recognizer import FlairRecognizer - -# Helper methods -@st.cache(allow_output_mutation=True) -def analyzer_engine(): - """Return AnalyzerEngine.""" - - spacy_recognizer = CustomSpacyRecognizer() - - configuration = { - "nlp_engine_name": "spacy", - "models": [ - {"lang_code": "en", "model_name": "en_spacy_pii_distilbert"}], - } - - # Create NLP engine based on configuration - provider = NlpEngineProvider(nlp_configuration=configuration) - nlp_engine = provider.create_engine() - - registry = RecognizerRegistry() - # add rule-based recognizers - registry.load_predefined_recognizers(nlp_engine=nlp_engine) - registry.add_recognizer(spacy_recognizer) - # remove the nlp engine we passed, to use custom label mappings - registry.remove_recognizer("SpacyRecognizer") - - analyzer = AnalyzerEngine(nlp_engine=nlp_engine, - registry=registry, supported_languages=["en"]) - - # uncomment for flair-based NLP recognizer - # flair_recognizer = FlairRecognizer() - # registry.load_predefined_recognizers() - # registry.add_recognizer(flair_recognizer) - # analyzer = AnalyzerEngine(registry=registry, supported_languages=["en"]) - return analyzer - - -@st.cache(allow_output_mutation=True) -def anonymizer_engine(): - """Return AnonymizerEngine.""" - return AnonymizerEngine() - - -def get_supported_entities(): - """Return supported entities from the Analyzer Engine.""" - return analyzer_engine().get_supported_entities() - - -def analyze(**kwargs): - """Analyze input using Analyzer engine and input arguments (kwargs).""" - if "entities" not in kwargs or "All" in kwargs["entities"]: - kwargs["entities"] = None - return analyzer_engine().analyze(**kwargs) - - -def anonymize(text, analyze_results): - """Anonymize identified input using Presidio Abonymizer.""" - if not text: - return - res = anonymizer_engine().anonymize(text, analyze_results) - return res.text - - -def annotate(text, st_analyze_results, st_entities): - tokens = [] - # sort by start index - results = sorted(st_analyze_results, key=lambda x: x.start) - for i, res in enumerate(results): - if i == 0: - tokens.append(text[:res.start]) - - # append entity text and entity type - tokens.append((text[res.start: res.end], res.entity_type)) - - # if another entity coming i.e. we're not at the last results element, add text up to next entity - if i != len(results) - 1: - tokens.append(text[res.end:results[i+1].start]) - # if no more entities coming, add all remaining text - else: - tokens.append(text[res.end:]) - return tokens - - -st.set_page_config(page_title="Privy + Presidio demo (English)", layout="wide") - -# Side bar -st.sidebar.markdown( - """ -Detect and anonymize PII in text using an [NLP model](https://huggingface.co/beki/en_spacy_pii_distilbert) trained on protocol traces (JSON, SQL, XML etc.) generated by -[Privy](https://github.com/pixie-io/pixie/tree/main/src/datagen/pii/privy) and rule-based classifiers from [Presidio](https://aka.ms/presidio). -""" -) - -st_entities = st.sidebar.multiselect( - label="Which entities to look for?", - options=get_supported_entities(), - default=list(get_supported_entities()), -) - -st_threshold = st.sidebar.slider( - label="Acceptance threshold", min_value=0.0, max_value=1.0, value=0.35 -) - -st_return_decision_process = st.sidebar.checkbox( - "Add analysis explanations in json") - -st.sidebar.info( - "Privy is an open source framework for synthetic data generation in protocol trace formats (json, sql, html etc). Presidio is an open source framework for PII detection and anonymization. " - "For more info visit [privy](https://github.com/pixie-io/pixie/tree/main/src/datagen/pii/privy) and [aka.ms/presidio](https://aka.ms/presidio)" -) - - -# Main panel -analyzer_load_state = st.info( - "Starting Presidio analyzer and loading Privy-trained PII model...") -engine = analyzer_engine() -analyzer_load_state.empty() - - -st_text = st.text_area( - label="Type in some text", - value="SELECT shipping FROM users WHERE shipping = '201 Thayer St Providence RI 02912'" - "\n\n" - "{user: Willie Porter, ip: 192.168.2.80, email: willie@gmail.com}", - height=200, -) - -button = st.button("Detect PII") - -if 'first_load' not in st.session_state: - st.session_state['first_load'] = True - -# After -st.subheader("Analyzed") -with st.spinner("Analyzing..."): - if button or st.session_state.first_load: - st_analyze_results = analyze( - text=st_text, - entities=st_entities, - language="en", - score_threshold=st_threshold, - return_decision_process=st_return_decision_process, - ) - annotated_tokens = annotate(st_text, st_analyze_results, st_entities) - # annotated_tokens - annotated_text(*annotated_tokens) -# vertical space -st.text("") - -st.subheader("Anonymized") - -with st.spinner("Anonymizing..."): - if button or st.session_state.first_load: - st_anonymize_results = anonymize(st_text, st_analyze_results) - st_anonymize_results - - -# table result -st.subheader("Detailed Findings") -if st_analyze_results: - res_dicts = [r.to_dict() for r in st_analyze_results] - for d in res_dicts: - d['Value'] = st_text[d['start']:d['end']] - df = pd.DataFrame.from_records(res_dicts) - df = df[["entity_type", "Value", "score", "start", "end"]].rename( - { - "entity_type": "Entity type", - "start": "Start", - "end": "End", - "score": "Confidence", - }, - axis=1, - ) - - st.dataframe(df, width=1000) -else: - st.text("No findings") - -st.session_state['first_load'] = True - -# json result - - -class ToDictListEncoder(JSONEncoder): - """Encode dict to json.""" - - def default(self, o): - """Encode to JSON using to_dict.""" - if o: - return o.to_dict() - return [] - - -if st_return_decision_process: - st.json(json.dumps(st_analyze_results, cls=ToDictListEncoder)) diff --git a/spaces/bguberfain/Detic/detic/data/custom_dataset_dataloader.py b/spaces/bguberfain/Detic/detic/data/custom_dataset_dataloader.py deleted file mode 100644 index 8f8d6817704026796d2c2f457fe2624800693267..0000000000000000000000000000000000000000 --- a/spaces/bguberfain/Detic/detic/data/custom_dataset_dataloader.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License) -import copy -import logging -import numpy as np -import operator -import torch -import torch.utils.data -import json -from detectron2.utils.comm import get_world_size -from detectron2.utils.logger import _log_api_usage, log_first_n - -from detectron2.config import configurable -from detectron2.data import samplers -from torch.utils.data.sampler import BatchSampler, Sampler -from detectron2.data.common import DatasetFromList, MapDataset -from detectron2.data.dataset_mapper import DatasetMapper -from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader -from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler -from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram -from detectron2.data.build import filter_images_with_only_crowd_annotations -from detectron2.data.build import filter_images_with_few_keypoints -from detectron2.data.build import check_metadata_consistency -from detectron2.data.catalog import MetadataCatalog, DatasetCatalog -from detectron2.utils import comm -import itertools -import math -from collections import defaultdict -from typing import Optional - - -def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None): - sampler_name = cfg.DATALOADER.SAMPLER_TRAIN - if 'MultiDataset' in sampler_name: - dataset_dicts = get_detection_dataset_dicts_with_source( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - else: - dataset_dicts = get_detection_dataset_dicts( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - - if mapper is None: - mapper = DatasetMapper(cfg, True) - - if sampler is not None: - pass - elif sampler_name == "TrainingSampler": - sampler = TrainingSampler(len(dataset)) - elif sampler_name == "MultiDatasetSampler": - sampler = MultiDatasetSampler( - dataset_dicts, - dataset_ratio = cfg.DATALOADER.DATASET_RATIO, - use_rfs = cfg.DATALOADER.USE_RFS, - dataset_ann = cfg.DATALOADER.DATASET_ANN, - repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD, - ) - elif sampler_name == "RepeatFactorTrainingSampler": - repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency( - dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD - ) - sampler = RepeatFactorTrainingSampler(repeat_factors) - else: - raise ValueError("Unknown training sampler: {}".format(sampler_name)) - - return { - "dataset": dataset_dicts, - "sampler": sampler, - "mapper": mapper, - "total_batch_size": cfg.SOLVER.IMS_PER_BATCH, - "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING, - "num_workers": cfg.DATALOADER.NUM_WORKERS, - 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING, - 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE, - 'dataset_bs': cfg.DATALOADER.DATASET_BS, - 'num_datasets': len(cfg.DATASETS.TRAIN) - } - - -@configurable(from_config=_custom_train_loader_from_config) -def build_custom_train_loader( - dataset, *, mapper, sampler, - total_batch_size=16, - aspect_ratio_grouping=True, - num_workers=0, - num_datasets=1, - multi_dataset_grouping=False, - use_diff_bs_size=False, - dataset_bs=[] - ): - """ - Modified from detectron2.data.build.build_custom_train_loader, but supports - different samplers - """ - if isinstance(dataset, list): - dataset = DatasetFromList(dataset, copy=False) - if mapper is not None: - dataset = MapDataset(dataset, mapper) - if sampler is None: - sampler = TrainingSampler(len(dataset)) - assert isinstance(sampler, torch.utils.data.sampler.Sampler) - if multi_dataset_grouping: - return build_multi_dataset_batch_data_loader( - use_diff_bs_size, - dataset_bs, - dataset, - sampler, - total_batch_size, - num_datasets=num_datasets, - num_workers=num_workers, - ) - else: - return build_batch_data_loader( - dataset, - sampler, - total_batch_size, - aspect_ratio_grouping=aspect_ratio_grouping, - num_workers=num_workers, - ) - - -def build_multi_dataset_batch_data_loader( - use_diff_bs_size, dataset_bs, - dataset, sampler, total_batch_size, num_datasets, num_workers=0 -): - """ - """ - world_size = get_world_size() - assert ( - total_batch_size > 0 and total_batch_size % world_size == 0 - ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format( - total_batch_size, world_size - ) - - batch_size = total_batch_size // world_size - data_loader = torch.utils.data.DataLoader( - dataset, - sampler=sampler, - num_workers=num_workers, - batch_sampler=None, - collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements - worker_init_fn=worker_init_reset_seed, - ) # yield individual mapped dict - if use_diff_bs_size: - return DIFFMDAspectRatioGroupedDataset( - data_loader, dataset_bs, num_datasets) - else: - return MDAspectRatioGroupedDataset( - data_loader, batch_size, num_datasets) - - -def get_detection_dataset_dicts_with_source( - dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None -): - assert len(dataset_names) - dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names] - for dataset_name, dicts in zip(dataset_names, dataset_dicts): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - - for source_id, (dataset_name, dicts) in \ - enumerate(zip(dataset_names, dataset_dicts)): - assert len(dicts), "Dataset '{}' is empty!".format(dataset_name) - for d in dicts: - d['dataset_source'] = source_id - - if "annotations" in dicts[0]: - try: - class_names = MetadataCatalog.get(dataset_name).thing_classes - check_metadata_consistency("thing_classes", dataset_name) - print_instances_class_histogram(dicts, class_names) - except AttributeError: # class names are not available for this dataset - pass - - assert proposal_files is None - - dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts)) - - has_instances = "annotations" in dataset_dicts[0] - if filter_empty and has_instances: - dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts) - if min_keypoints > 0 and has_instances: - dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints) - - return dataset_dicts - - -class MultiDatasetSampler(Sampler): - def __init__( - self, - dataset_dicts, - dataset_ratio, - use_rfs, - dataset_ann, - repeat_threshold=0.001, - seed: Optional[int] = None, - ): - """ - """ - sizes = [0 for _ in range(len(dataset_ratio))] - for d in dataset_dicts: - sizes[d['dataset_source']] += 1 - print('dataset sizes', sizes) - self.sizes = sizes - assert len(dataset_ratio) == len(sizes), \ - 'length of dataset ratio {} should be equal to number if dataset {}'.format( - len(dataset_ratio), len(sizes) - ) - if seed is None: - seed = comm.shared_random_seed() - self._seed = int(seed) - self._rank = comm.get_rank() - self._world_size = comm.get_world_size() - - self.dataset_ids = torch.tensor( - [d['dataset_source'] for d in dataset_dicts], dtype=torch.long) - - dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \ - for i, (r, s) in enumerate(zip(dataset_ratio, sizes))] - dataset_weight = torch.cat(dataset_weight) - - rfs_factors = [] - st = 0 - for i, s in enumerate(sizes): - if use_rfs[i]: - if dataset_ann[i] == 'box': - rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency - else: - rfs_func = repeat_factors_from_tag_frequency - rfs_factor = rfs_func( - dataset_dicts[st: st + s], - repeat_thresh=repeat_threshold) - rfs_factor = rfs_factor * (s / rfs_factor.sum()) - else: - rfs_factor = torch.ones(s) - rfs_factors.append(rfs_factor) - st = st + s - rfs_factors = torch.cat(rfs_factors) - - self.weights = dataset_weight * rfs_factors - self.sample_epoch_size = len(self.weights) - - def __iter__(self): - start = self._rank - yield from itertools.islice( - self._infinite_indices(), start, None, self._world_size) - - - def _infinite_indices(self): - g = torch.Generator() - g.manual_seed(self._seed) - while True: - ids = torch.multinomial( - self.weights, self.sample_epoch_size, generator=g, - replacement=True) - nums = [(self.dataset_ids[ids] == i).sum().int().item() \ - for i in range(len(self.sizes))] - yield from ids - - -class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_size, num_datasets): - """ - """ - self.dataset = dataset - self.batch_size = batch_size - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_size: - yield bucket[:] - del bucket[:] - - -class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset): - def __init__(self, dataset, batch_sizes, num_datasets): - """ - """ - self.dataset = dataset - self.batch_sizes = batch_sizes - self._buckets = [[] for _ in range(2 * num_datasets)] - - def __iter__(self): - for d in self.dataset: - w, h = d["width"], d["height"] - aspect_ratio_bucket_id = 0 if w > h else 1 - bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id - bucket = self._buckets[bucket_id] - bucket.append(d) - if len(bucket) == self.batch_sizes[d['dataset_source']]: - yield bucket[:] - del bucket[:] - - -def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh): - """ - """ - category_freq = defaultdict(int) - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - for cat_id in cat_ids: - category_freq[cat_id] += 1 - num_images = len(dataset_dicts) - for k, v in category_freq.items(): - category_freq[k] = v / num_images - - category_rep = { - cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq)) - for cat_id, cat_freq in category_freq.items() - } - - rep_factors = [] - for dataset_dict in dataset_dicts: - cat_ids = dataset_dict['pos_category_ids'] - rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0) - rep_factors.append(rep_factor) - - return torch.tensor(rep_factors, dtype=torch.float32) \ No newline at end of file diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/deepbooru.py b/spaces/bigjoker/stable-diffusion-webui/modules/deepbooru.py deleted file mode 100644 index 122fce7f569dbd28f9c6d83af874bb3efed34a5e..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/deepbooru.py +++ /dev/null @@ -1,99 +0,0 @@ -import os -import re - -import torch -from PIL import Image -import numpy as np - -from modules import modelloader, paths, deepbooru_model, devices, images, shared - -re_special = re.compile(r'([\\()])') - - -class DeepDanbooru: - def __init__(self): - self.model = None - - def load(self): - if self.model is not None: - return - - files = modelloader.load_models( - model_path=os.path.join(paths.models_path, "torch_deepdanbooru"), - model_url='https://github.com/AUTOMATIC1111/TorchDeepDanbooru/releases/download/v1/model-resnet_custom_v3.pt', - ext_filter=[".pt"], - download_name='model-resnet_custom_v3.pt', - ) - - self.model = deepbooru_model.DeepDanbooruModel() - self.model.load_state_dict(torch.load(files[0], map_location="cpu")) - - self.model.eval() - self.model.to(devices.cpu, devices.dtype) - - def start(self): - self.load() - self.model.to(devices.device) - - def stop(self): - if not shared.opts.interrogate_keep_models_in_memory: - self.model.to(devices.cpu) - devices.torch_gc() - - def tag(self, pil_image): - self.start() - res = self.tag_multi(pil_image) - self.stop() - - return res - - def tag_multi(self, pil_image, force_disable_ranks=False): - threshold = shared.opts.interrogate_deepbooru_score_threshold - use_spaces = shared.opts.deepbooru_use_spaces - use_escape = shared.opts.deepbooru_escape - alpha_sort = shared.opts.deepbooru_sort_alpha - include_ranks = shared.opts.interrogate_return_ranks and not force_disable_ranks - - pic = images.resize_image(2, pil_image.convert("RGB"), 512, 512) - a = np.expand_dims(np.array(pic, dtype=np.float32), 0) / 255 - - with torch.no_grad(), devices.autocast(): - x = torch.from_numpy(a).to(devices.device) - y = self.model(x)[0].detach().cpu().numpy() - - probability_dict = {} - - for tag, probability in zip(self.model.tags, y): - if probability < threshold: - continue - - if tag.startswith("rating:"): - continue - - probability_dict[tag] = probability - - if alpha_sort: - tags = sorted(probability_dict) - else: - tags = [tag for tag, _ in sorted(probability_dict.items(), key=lambda x: -x[1])] - - res = [] - - filtertags = set([x.strip().replace(' ', '_') for x in shared.opts.deepbooru_filter_tags.split(",")]) - - for tag in [x for x in tags if x not in filtertags]: - probability = probability_dict[tag] - tag_outformat = tag - if use_spaces: - tag_outformat = tag_outformat.replace('_', ' ') - if use_escape: - tag_outformat = re.sub(re_special, r'\\\1', tag_outformat) - if include_ranks: - tag_outformat = f"({tag_outformat}:{probability:.3f})" - - res.append(tag_outformat) - - return ", ".join(res) - - -model = DeepDanbooru() diff --git a/spaces/billusanda007/Resume-Ranker/README.md b/spaces/billusanda007/Resume-Ranker/README.md deleted file mode 100644 index 26904e60e7ff1a27c4dd296d55e868bb06d36d27..0000000000000000000000000000000000000000 --- a/spaces/billusanda007/Resume-Ranker/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Resume Ranker -emoji: 📊 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/dataset_tool.py b/spaces/birsardar/stable-diffusion-mat-outpainting-primer/dataset_tool.py deleted file mode 100644 index c59e6292891c3896722965020af7c60056729f2d..0000000000000000000000000000000000000000 --- a/spaces/birsardar/stable-diffusion-mat-outpainting-primer/dataset_tool.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import functools -import io -import json -import os -import pickle -import sys -import tarfile -import gzip -import zipfile -from pathlib import Path -from typing import Callable, Optional, Tuple, Union - -import click -import numpy as np -import PIL.Image -from tqdm import tqdm - -#---------------------------------------------------------------------------- - -def error(msg): - print('Error: ' + msg) - sys.exit(1) - -#---------------------------------------------------------------------------- - -def maybe_min(a: int, b: Optional[int]) -> int: - if b is not None: - return min(a, b) - return a - -#---------------------------------------------------------------------------- - -def file_ext(name: Union[str, Path]) -> str: - return str(name).split('.')[-1] - -#---------------------------------------------------------------------------- - -def is_image_ext(fname: Union[str, Path]) -> bool: - ext = file_ext(fname).lower() - return f'.{ext}' in PIL.Image.EXTENSION # type: ignore - -#---------------------------------------------------------------------------- - -def open_image_folder(source_dir, *, max_images: Optional[int]): - input_images = [str(f) for f in sorted(Path(source_dir).rglob('*')) if is_image_ext(f) and os.path.isfile(f)] - - # Load labels. - labels = {} - meta_fname = os.path.join(source_dir, 'dataset.json') - if os.path.isfile(meta_fname): - with open(meta_fname, 'r') as file: - labels = json.load(file)['labels'] - if labels is not None: - labels = { x[0]: x[1] for x in labels } - else: - labels = {} - - max_idx = maybe_min(len(input_images), max_images) - - def iterate_images(): - for idx, fname in enumerate(input_images): - arch_fname = os.path.relpath(fname, source_dir) - arch_fname = arch_fname.replace('\\', '/') - img = np.array(PIL.Image.open(fname)) - yield dict(img=img, label=labels.get(arch_fname)) - if idx >= max_idx-1: - break - return max_idx, iterate_images() - -#---------------------------------------------------------------------------- - -def open_image_zip(source, *, max_images: Optional[int]): - with zipfile.ZipFile(source, mode='r') as z: - input_images = [str(f) for f in sorted(z.namelist()) if is_image_ext(f)] - - # Load labels. - labels = {} - if 'dataset.json' in z.namelist(): - with z.open('dataset.json', 'r') as file: - labels = json.load(file)['labels'] - if labels is not None: - labels = { x[0]: x[1] for x in labels } - else: - labels = {} - - max_idx = maybe_min(len(input_images), max_images) - - def iterate_images(): - with zipfile.ZipFile(source, mode='r') as z: - for idx, fname in enumerate(input_images): - with z.open(fname, 'r') as file: - img = PIL.Image.open(file) # type: ignore - img = np.array(img) - yield dict(img=img, label=labels.get(fname)) - if idx >= max_idx-1: - break - return max_idx, iterate_images() - -#---------------------------------------------------------------------------- - -def open_lmdb(lmdb_dir: str, *, max_images: Optional[int]): - import cv2 # pip install opencv-python - import lmdb # pip install lmdb # pylint: disable=import-error - - with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn: - max_idx = maybe_min(txn.stat()['entries'], max_images) - - def iterate_images(): - with lmdb.open(lmdb_dir, readonly=True, lock=False).begin(write=False) as txn: - for idx, (_key, value) in enumerate(txn.cursor()): - try: - try: - img = cv2.imdecode(np.frombuffer(value, dtype=np.uint8), 1) - if img is None: - raise IOError('cv2.imdecode failed') - img = img[:, :, ::-1] # BGR => RGB - except IOError: - img = np.array(PIL.Image.open(io.BytesIO(value))) - yield dict(img=img, label=None) - if idx >= max_idx-1: - break - except: - print(sys.exc_info()[1]) - - return max_idx, iterate_images() - -#---------------------------------------------------------------------------- - -def open_cifar10(tarball: str, *, max_images: Optional[int]): - images = [] - labels = [] - - with tarfile.open(tarball, 'r:gz') as tar: - for batch in range(1, 6): - member = tar.getmember(f'cifar-10-batches-py/data_batch_{batch}') - with tar.extractfile(member) as file: - data = pickle.load(file, encoding='latin1') - images.append(data['data'].reshape(-1, 3, 32, 32)) - labels.append(data['labels']) - - images = np.concatenate(images) - labels = np.concatenate(labels) - images = images.transpose([0, 2, 3, 1]) # NCHW -> NHWC - assert images.shape == (50000, 32, 32, 3) and images.dtype == np.uint8 - assert labels.shape == (50000,) and labels.dtype in [np.int32, np.int64] - assert np.min(images) == 0 and np.max(images) == 255 - assert np.min(labels) == 0 and np.max(labels) == 9 - - max_idx = maybe_min(len(images), max_images) - - def iterate_images(): - for idx, img in enumerate(images): - yield dict(img=img, label=int(labels[idx])) - if idx >= max_idx-1: - break - - return max_idx, iterate_images() - -#---------------------------------------------------------------------------- - -def open_mnist(images_gz: str, *, max_images: Optional[int]): - labels_gz = images_gz.replace('-images-idx3-ubyte.gz', '-labels-idx1-ubyte.gz') - assert labels_gz != images_gz - images = [] - labels = [] - - with gzip.open(images_gz, 'rb') as f: - images = np.frombuffer(f.read(), np.uint8, offset=16) - with gzip.open(labels_gz, 'rb') as f: - labels = np.frombuffer(f.read(), np.uint8, offset=8) - - images = images.reshape(-1, 28, 28) - images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0) - assert images.shape == (60000, 32, 32) and images.dtype == np.uint8 - assert labels.shape == (60000,) and labels.dtype == np.uint8 - assert np.min(images) == 0 and np.max(images) == 255 - assert np.min(labels) == 0 and np.max(labels) == 9 - - max_idx = maybe_min(len(images), max_images) - - def iterate_images(): - for idx, img in enumerate(images): - yield dict(img=img, label=int(labels[idx])) - if idx >= max_idx-1: - break - - return max_idx, iterate_images() - -#---------------------------------------------------------------------------- - -def make_transform( - transform: Optional[str], - output_width: Optional[int], - output_height: Optional[int], - resize_filter: str -) -> Callable[[np.ndarray], Optional[np.ndarray]]: - resample = { 'box': PIL.Image.BOX, 'lanczos': PIL.Image.LANCZOS }[resize_filter] - def scale(width, height, img): - w = img.shape[1] - h = img.shape[0] - if width == w and height == h: - return img - img = PIL.Image.fromarray(img) - ww = width if width is not None else w - hh = height if height is not None else h - img = img.resize((ww, hh), resample) - return np.array(img) - - def center_crop(width, height, img): - crop = np.min(img.shape[:2]) - img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2] - img = PIL.Image.fromarray(img, 'RGB') - img = img.resize((width, height), resample) - return np.array(img) - - def center_crop_wide(width, height, img): - ch = int(np.round(width * img.shape[0] / img.shape[1])) - if img.shape[1] < width or ch < height: - return None - - img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2] - img = PIL.Image.fromarray(img, 'RGB') - img = img.resize((width, height), resample) - img = np.array(img) - - canvas = np.zeros([width, width, 3], dtype=np.uint8) - canvas[(width - height) // 2 : (width + height) // 2, :] = img - return canvas - - if transform is None: - return functools.partial(scale, output_width, output_height) - if transform == 'center-crop': - if (output_width is None) or (output_height is None): - error ('must specify --width and --height when using ' + transform + 'transform') - return functools.partial(center_crop, output_width, output_height) - if transform == 'center-crop-wide': - if (output_width is None) or (output_height is None): - error ('must specify --width and --height when using ' + transform + ' transform') - return functools.partial(center_crop_wide, output_width, output_height) - assert False, 'unknown transform' - -#---------------------------------------------------------------------------- - -def open_dataset(source, *, max_images: Optional[int]): - if os.path.isdir(source): - if source.rstrip('/').endswith('_lmdb'): - return open_lmdb(source, max_images=max_images) - else: - return open_image_folder(source, max_images=max_images) - elif os.path.isfile(source): - if os.path.basename(source) == 'cifar-10-python.tar.gz': - return open_cifar10(source, max_images=max_images) - elif os.path.basename(source) == 'train-images-idx3-ubyte.gz': - return open_mnist(source, max_images=max_images) - elif file_ext(source) == 'zip': - return open_image_zip(source, max_images=max_images) - else: - assert False, 'unknown archive type' - else: - error(f'Missing input file or directory: {source}') - -#---------------------------------------------------------------------------- - -def open_dest(dest: str) -> Tuple[str, Callable[[str, Union[bytes, str]], None], Callable[[], None]]: - dest_ext = file_ext(dest) - - if dest_ext == 'zip': - if os.path.dirname(dest) != '': - os.makedirs(os.path.dirname(dest), exist_ok=True) - zf = zipfile.ZipFile(file=dest, mode='w', compression=zipfile.ZIP_STORED) - def zip_write_bytes(fname: str, data: Union[bytes, str]): - zf.writestr(fname, data) - return '', zip_write_bytes, zf.close - else: - # If the output folder already exists, check that is is - # empty. - # - # Note: creating the output directory is not strictly - # necessary as folder_write_bytes() also mkdirs, but it's better - # to give an error message earlier in case the dest folder - # somehow cannot be created. - if os.path.isdir(dest) and len(os.listdir(dest)) != 0: - error('--dest folder must be empty') - os.makedirs(dest, exist_ok=True) - - def folder_write_bytes(fname: str, data: Union[bytes, str]): - os.makedirs(os.path.dirname(fname), exist_ok=True) - with open(fname, 'wb') as fout: - if isinstance(data, str): - data = data.encode('utf8') - fout.write(data) - return dest, folder_write_bytes, lambda: None - -#---------------------------------------------------------------------------- - -@click.command() -@click.pass_context -@click.option('--source', help='Directory or archive name for input dataset', required=True, metavar='PATH') -@click.option('--dest', help='Output directory or archive name for output dataset', required=True, metavar='PATH') -@click.option('--max-images', help='Output only up to `max-images` images', type=int, default=None) -@click.option('--resize-filter', help='Filter to use when resizing images for output resolution', type=click.Choice(['box', 'lanczos']), default='lanczos', show_default=True) -@click.option('--transform', help='Input crop/resize mode', type=click.Choice(['center-crop', 'center-crop-wide'])) -@click.option('--width', help='Output width', type=int) -@click.option('--height', help='Output height', type=int) -def convert_dataset( - ctx: click.Context, - source: str, - dest: str, - max_images: Optional[int], - transform: Optional[str], - resize_filter: str, - width: Optional[int], - height: Optional[int] -): - """Convert an image dataset into a dataset archive usable with StyleGAN2 ADA PyTorch. - - The input dataset format is guessed from the --source argument: - - \b - --source *_lmdb/ Load LSUN dataset - --source cifar-10-python.tar.gz Load CIFAR-10 dataset - --source train-images-idx3-ubyte.gz Load MNIST dataset - --source path/ Recursively load all images from path/ - --source dataset.zip Recursively load all images from dataset.zip - - Specifying the output format and path: - - \b - --dest /path/to/dir Save output files under /path/to/dir - --dest /path/to/dataset.zip Save output files into /path/to/dataset.zip - - The output dataset format can be either an image folder or an uncompressed zip archive. - Zip archives makes it easier to move datasets around file servers and clusters, and may - offer better training performance on network file systems. - - Images within the dataset archive will be stored as uncompressed PNG. - Uncompresed PNGs can be efficiently decoded in the training loop. - - Class labels are stored in a file called 'dataset.json' that is stored at the - dataset root folder. This file has the following structure: - - \b - { - "labels": [ - ["00000/img00000000.png",6], - ["00000/img00000001.png",9], - ... repeated for every image in the datase - ["00049/img00049999.png",1] - ] - } - - If the 'dataset.json' file cannot be found, the dataset is interpreted as - not containing class labels. - - Image scale/crop and resolution requirements: - - Output images must be square-shaped and they must all have the same power-of-two - dimensions. - - To scale arbitrary input image size to a specific width and height, use the - --width and --height options. Output resolution will be either the original - input resolution (if --width/--height was not specified) or the one specified with - --width/height. - - Use the --transform=center-crop or --transform=center-crop-wide options to apply a - center crop transform on the input image. These options should be used with the - --width and --height options. For example: - - \b - python dataset_tool.py --source LSUN/raw/cat_lmdb --dest /tmp/lsun_cat \\ - --transform=center-crop-wide --width 512 --height=384 - """ - - PIL.Image.init() # type: ignore - - if dest == '': - ctx.fail('--dest output filename or directory must not be an empty string') - - num_files, input_iter = open_dataset(source, max_images=max_images) - archive_root_dir, save_bytes, close_dest = open_dest(dest) - - transform_image = make_transform(transform, width, height, resize_filter) - - dataset_attrs = None - - labels = [] - for idx, image in tqdm(enumerate(input_iter), total=num_files): - idx_str = f'{idx:08d}' - archive_fname = f'{idx_str[:5]}/img{idx_str}.png' - - # Apply crop and resize. - img = transform_image(image['img']) - - # Transform may drop images. - if img is None: - continue - - # Error check to require uniform image attributes across - # the whole dataset. - channels = img.shape[2] if img.ndim == 3 else 1 - cur_image_attrs = { - 'width': img.shape[1], - 'height': img.shape[0], - 'channels': channels - } - if dataset_attrs is None: - dataset_attrs = cur_image_attrs - width = dataset_attrs['width'] - height = dataset_attrs['height'] - if width != height: - error(f'Image dimensions after scale and crop are required to be square. Got {width}x{height}') - if dataset_attrs['channels'] not in [1, 3]: - error('Input images must be stored as RGB or grayscale') - if width != 2 ** int(np.floor(np.log2(width))): - error('Image width/height after scale and crop are required to be power-of-two') - elif dataset_attrs != cur_image_attrs: - err = [f' dataset {k}/cur image {k}: {dataset_attrs[k]}/{cur_image_attrs[k]}' for k in dataset_attrs.keys()] - error(f'Image {archive_fname} attributes must be equal across all images of the dataset. Got:\n' + '\n'.join(err)) - - # Save the image as an uncompressed PNG. - img = PIL.Image.fromarray(img, { 1: 'L', 3: 'RGB' }[channels]) - image_bits = io.BytesIO() - img.save(image_bits, format='png', compress_level=0, optimize=False) - save_bytes(os.path.join(archive_root_dir, archive_fname), image_bits.getbuffer()) - labels.append([archive_fname, image['label']] if image['label'] is not None else None) - - metadata = { - 'labels': labels if all(x is not None for x in labels) else None - } - save_bytes(os.path.join(archive_root_dir, 'dataset.json'), json.dumps(metadata)) - close_dest() - -#---------------------------------------------------------------------------- - -if __name__ == "__main__": - convert_dataset() # pylint: disable=no-value-for-parameter diff --git a/spaces/blmdsydm/faster-whisper-webui/src/conversion/hf_converter.py b/spaces/blmdsydm/faster-whisper-webui/src/conversion/hf_converter.py deleted file mode 100644 index 6da4f0fd672d63b099f21d0498ba4001d23356f7..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/src/conversion/hf_converter.py +++ /dev/null @@ -1,67 +0,0 @@ -# https://github.com/bayartsogt-ya/whisper-multiple-hf-datasets - -from copy import deepcopy -import torch - -WHISPER_MAPPING = { - "layers": "blocks", - "fc1": "mlp.0", - "fc2": "mlp.2", - "final_layer_norm": "mlp_ln", - "layers": "blocks", - ".self_attn.q_proj": ".attn.query", - ".self_attn.k_proj": ".attn.key", - ".self_attn.v_proj": ".attn.value", - ".self_attn_layer_norm": ".attn_ln", - ".self_attn.out_proj": ".attn.out", - ".encoder_attn.q_proj": ".cross_attn.query", - ".encoder_attn.k_proj": ".cross_attn.key", - ".encoder_attn.v_proj": ".cross_attn.value", - ".encoder_attn_layer_norm": ".cross_attn_ln", - ".encoder_attn.out_proj": ".cross_attn.out", - "decoder.layer_norm.": "decoder.ln.", - "encoder.layer_norm.": "encoder.ln_post.", - "embed_tokens": "token_embedding", - "encoder.embed_positions.weight": "encoder.positional_embedding", - "decoder.embed_positions.weight": "decoder.positional_embedding", - "layer_norm": "ln_post", -} - - -def rename_keys(s_dict): - keys = list(s_dict.keys()) - for key in keys: - new_key = key - for k, v in WHISPER_MAPPING.items(): - if k in key: - new_key = new_key.replace(k, v) - - print(f"{key} -> {new_key}") - - s_dict[new_key] = s_dict.pop(key) - return s_dict - - -def convert_hf_whisper(hf_model_name_or_path: str, whisper_state_path: str): - from transformers import WhisperForConditionalGeneration - transformer_model = WhisperForConditionalGeneration.from_pretrained(hf_model_name_or_path) - config = transformer_model.config - - # first build dims - dims = { - 'n_mels': config.num_mel_bins, - 'n_vocab': config.vocab_size, - 'n_audio_ctx': config.max_source_positions, - 'n_audio_state': config.d_model, - 'n_audio_head': config.encoder_attention_heads, - 'n_audio_layer': config.encoder_layers, - 'n_text_ctx': config.max_target_positions, - 'n_text_state': config.d_model, - 'n_text_head': config.decoder_attention_heads, - 'n_text_layer': config.decoder_layers - } - - state_dict = deepcopy(transformer_model.model.state_dict()) - state_dict = rename_keys(state_dict) - - torch.save({"dims": dims, "model_state_dict": state_dict}, whisper_state_path) \ No newline at end of file diff --git a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/utils.py b/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/utils.py deleted file mode 100644 index 93df40bee97dee744b7585dba1fe9a3f26d3e9b5..0000000000000000000000000000000000000000 --- a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/model/utils.py +++ /dev/null @@ -1,44 +0,0 @@ -""" from https://github.com/jaywalnut310/glow-tts """ - -import torch - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(int(max_length), dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def fix_len_compatibility(length, num_downsamplings_in_unet=2): - while True: - if length % (2**num_downsamplings_in_unet) == 0: - return length - length += 1 - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def generate_path(duration, mask): - device = duration.device - - b, t_x, t_y = mask.shape - cum_duration = torch.cumsum(duration, 1) - path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], - [1, 0], [0, 0]]))[:, :-1] - path = path * mask - return path - - -def duration_loss(logw, logw_, lengths): - loss = torch.sum((logw - logw_)**2) / torch.sum(lengths) - return loss diff --git a/spaces/bradarrML/stablediffusion-infinity/js/w2ui.min.js b/spaces/bradarrML/stablediffusion-infinity/js/w2ui.min.js deleted file mode 100644 index ae849e5012ea6583f8d4f83151d94ad270c6bf4e..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/stablediffusion-infinity/js/w2ui.min.js +++ /dev/null @@ -1,486 +0,0 @@ -/* w2ui 2.0.x (nightly) (10/10/2022, 1:43:34 PM) (c) http://w2ui.com, vitmalina@gmail.com */ -class w2event{constructor(e,t){Object.assign(this,{type:t.type??null,detail:t,owner:e,target:t.target??null,phase:t.phase??"before",object:t.object??null,execute:null,isStopped:!1,isCancelled:!1,onComplete:null,listeners:[]}),delete t.type,delete t.target,delete t.object,this.complete=new Promise((e,t)=>{this._resolve=e,this._reject=t}),this.complete.catch(()=>{})}finish(e){e&&w2utils.extend(this.detail,e),this.phase="after",this.owner.trigger.call(this.owner,this)}done(e){this.listeners.push(e)}preventDefault(){this._reject(),this.isCancelled=!0}stopPropagation(){this.isStopped=!0}}class w2base{constructor(e){if(this.activeEvents=[],this.listeners=[],void 0!==e){if(!w2utils.checkName(e))return;w2ui[e]=this}this.debug=!1}on(e,r){return(e="string"==typeof e?e.split(/[,\s]+/):[e]).forEach(e=>{var t,i,s,l="string"==typeof e?e:e.type+":"+e.execute+"."+e.scope;"string"==typeof e&&([i,t]=e.split("."),[i,s]=i.replace(":complete",":after").replace(":done",":after").split(":"),e={type:i,execute:s??"before",scope:t}),(e=w2utils.extend({type:null,execute:"before",onComplete:null},e)).type?r?(Array.isArray(this.listeners)||(this.listeners=[]),this.listeners.push({name:l,edata:e,handler:r}),this.debug&&console.log("w2base: add event",{name:l,edata:e,handler:r})):console.log("ERROR: You must specify event handler function when calling .on() method of "+this.name):console.log("ERROR: You must specify event type when calling .on() method of "+this.name)}),this}off(e,r){return(e="string"==typeof e?e.split(/[,\s]+/):[e]).forEach(i=>{var e,t,s,l="string"==typeof i?i:i.type+":"+i.execute+"."+i.scope;if("string"==typeof i&&([t,e]=i.split("."),[t,s]=t.replace(":complete",":after").replace(":done",":after").split(":"),i={type:t||"*",execute:s||"",scope:e||""}),(i=w2utils.extend({type:null,execute:null,onComplete:null},i)).type||i.scope){r=r||null;let t=0;this.listeners=this.listeners.filter(e=>"*"!==i.type&&i.type!==e.edata.type||""!==i.execute&&i.execute!==e.edata.execute||""!==i.scope&&i.scope!==e.edata.scope||null!=i.handler&&i.handler!==e.edata.handler||(t++,!1)),this.debug&&console.log(`w2base: remove event (${t})`,{name:l,edata:i,handler:r})}else console.log("ERROR: You must specify event type when calling .off() method of "+this.name)}),this}trigger(e,i){if(1==arguments.length?i=e:(i.type=e,i.target=i.target??this),w2utils.isPlainObject(i)&&"after"==i.phase){if(!(i=this.activeEvents.find(e=>e.type==i.type&&e.target==i.target)))return void console.log(`ERROR: Cannot find even handler for "${i.type}" on "${i.target}".`);console.log("NOTICE: This syntax \"edata.trigger({ phase: 'after' })\" is outdated. Use edata.finish() instead.")}else i instanceof w2event||(i=new w2event(this,i),this.activeEvents.push(i));let s,t,l;Array.isArray(this.listeners)||(this.listeners=[]),this.debug&&console.log(`w2base: trigger "${i.type}:${i.phase}"`,i);for(let e=this.listeners.length-1;0<=e;e--){let t=this.listeners[e];if(!(null==t||t.edata.type!==i.type&&"*"!==t.edata.type||t.edata.target!==i.target&&null!=t.edata.target||t.edata.execute!==i.phase&&"*"!==t.edata.execute&&"*"!==t.edata.phase)&&(Object.keys(t.edata).forEach(e=>{null==i[e]&&null!=t.edata[e]&&(i[e]=t.edata[e])}),s=[],l=new RegExp(/\((.*?)\)/).exec(String(t.handler).split("=>")[0]),2===(s=l?l[1].split(/\s*,\s*/):s).length?(t.handler.call(this,i.target,i),this.debug&&console.log(" - call (old)",t.handler)):(t.handler.call(this,i),this.debug&&console.log(" - call",t.handler)),!0===i.isStopped||!0===i.stop))return i}e="on"+i.type.substr(0,1).toUpperCase()+i.type.substr(1);if(!("before"===i.phase&&"function"==typeof this[e]&&(t=this[e],s=[],l=new RegExp(/\((.*?)\)/).exec(String(t).split("=>")[0]),2===(s=l?l[1].split(/\s*,\s*/):s).length?(t.call(this,i.target,i),this.debug&&console.log(" - call: on[Event] (old)",t)):(t.call(this,i),this.debug&&console.log(" - call: on[Event]",t)),!0===i.isStopped||!0===i.stop)||null!=i.object&&"before"===i.phase&&"function"==typeof i.object[e]&&(t=i.object[e],s=[],l=new RegExp(/\((.*?)\)/).exec(String(t).split("=>")[0]),2===(s=l?l[1].split(/\s*,\s*/):s).length?(t.call(this,i.target,i),this.debug&&console.log(" - call: edata.object (old)",t)):(t.call(this,i),this.debug&&console.log(" - call: edata.object",t)),!0===i.isStopped||!0===i.stop)||"after"!==i.phase)){"function"==typeof i.onComplete&&i.onComplete.call(this,i);for(let e=0;e{this[t]=e})}static _fragment(e){let i=document.createElement("template");return i.innerHTML=e,i.content.childNodes.forEach(e=>{var t=Query._scriptConvert(e);t!=e&&i.content.replaceChild(t,e)}),i.content}static _scriptConvert(e){let t=e=>{var t=e.ownerDocument.createElement("script"),i=(t.text=e.text,e.attributes);for(let e=0;e{e.parentNode.replaceChild(t(e),e)}),e}static _fixProp(e){var t={cellpadding:"cellPadding",cellspacing:"cellSpacing",class:"className",colspan:"colSpan",contenteditable:"contentEditable",for:"htmlFor",frameborder:"frameBorder",maxlength:"maxLength",readonly:"readOnly",rowspan:"rowSpan",tabindex:"tabIndex",usemap:"useMap"};return t[e]||e}_insert(l,i){let r=[],n=this.length;if(!(n<1)){let e=this;if("string"==typeof i)this.each(e=>{var t=Query._fragment(i);r.push(...t.childNodes),e[l](t)});else if(i instanceof Query){let s=1==n;i.each(i=>{this.each(e=>{var t=s?i:i.cloneNode(!0);r.push(t),e[l](t),Query._scriptConvert(t)})}),s||i.remove()}else{if(!(i instanceof Node))throw new Error(`Incorrect argument for "${l}(html)". It expects one string argument.`);this.each(e=>{var t=1===n?i:Query._fragment(i.outerHTML);r.push(...1===n?[i]:t.childNodes),e[l](t)}),1{e=Array.from(e.querySelectorAll(t));0{(e===t||"string"==typeof t&&e.matches&&e.matches(t)||"function"==typeof t&&t(e))&&i.push(e)}),new Query(i,this.context,this)}next(){let t=[];return this.each(e=>{e=e.nextElementSibling;e&&t.push(e)}),new Query(t,this.context,this)}prev(){let t=[];return this.each(e=>{e=e.previousElementSibling;e&&t.push(e)}),new Query(t,this.context,this)}shadow(e){let t=[];this.each(e=>{e.shadowRoot&&t.push(e.shadowRoot)});var i=new Query(t,this.context,this);return e?i.find(e):i}closest(t){let i=[];return this.each(e=>{e=e.closest(t);e&&i.push(e)}),new Query(i,this.context,this)}host(t){let i=[],s=e=>e.parentNode?s(e.parentNode):e,l=e=>{e=s(e);i.push(e.host||e),e.host&&t&&l(e.host)};return this.each(e=>{l(e)}),new Query(i,this.context,this)}parent(e){return this.parents(e,!0)}parents(e,t){let i=[],s=e=>{if(-1==i.indexOf(e)&&i.push(e),!t&&e.parentNode)return s(e.parentNode)};this.each(e=>{e.parentNode&&s(e.parentNode)});var l=new Query(i,this.context,this);return e?l.filter(e):l}add(e){e=e instanceof Query?e.nodes:Array.isArray(e)?e:[e];return new Query(this.nodes.concat(e),this.context,this)}each(i){return this.nodes.forEach((e,t)=>{i(e,t,this)}),this}append(e){return this._insert("append",e)}prepend(e){return this._insert("prepend",e)}after(e){return this._insert("after",e)}before(e){return this._insert("before",e)}replace(e){return this._insert("replaceWith",e)}remove(){return this.each(e=>{e.remove()}),this}css(e,t){let s=e;var i,l=arguments.length;return 0===l||1===l&&"string"==typeof e?this[0]?(l=this[0].style,"string"==typeof e?(i=l.getPropertyPriority(e),l.getPropertyValue(e)+(i?"!"+i:"")):Object.fromEntries(this[0].style.cssText.split(";").filter(e=>!!e).map(e=>e.split(":").map(e=>e.trim())))):void 0:("object"!=typeof e&&((s={})[e]=t),this.each((i,e)=>{Object.keys(s).forEach(e=>{var t=String(s[e]).toLowerCase().includes("!important")?"important":"";i.style.setProperty(e,String(s[e]).replace(/\!important/i,""),t)})}),this)}addClass(e){return this.toggleClass(e,!0),this}removeClass(e){return this.toggleClass(e,!1),this}toggleClass(t,s){return"string"==typeof t&&(t=t.split(/[,\s]+/)),this.each(i=>{let e=t;(e=null==e&&!1===s?Array.from(i.classList):e).forEach(t=>{if(""!==t){let e=null!=s?s?"add":"remove":"toggle";i.classList[e](t)}})}),this}hasClass(e){if(null==(e="string"==typeof e?e.split(/[,\s]+/):e)&&0{i=i||e.every(e=>Array.from(t.classList??[]).includes(e))}),i}on(e,s,l){"function"==typeof s&&(l=s,s=void 0);let r;return s?.delegate&&(r=s.delegate,delete s.delegate),(e=e.split(/[,\s]+/)).forEach(e=>{let[t,i]=String(e).toLowerCase().split(".");if(r){let i=l;l=e=>{var t=query(e.target).parents(r);0{this._save(e,"events",[{event:t,scope:i,callback:l,options:s}]),e.addEventListener(t,l,s)})}),this}off(e,t,r){return"function"==typeof t&&(r=t,t=void 0),(e=(e??"").split(/[,\s]+/)).forEach(e=>{let[s,l]=String(e).toLowerCase().split(".");this.each(t=>{if(Array.isArray(t._mQuery?.events))for(let e=t._mQuery.events.length-1;0<=e;e--){var i=t._mQuery.events[e];null==l||""===l?i.event!=s&&""!==s||i.callback!=r&&null!=r||(t.removeEventListener(i.event,i.callback,i.options),t._mQuery.events.splice(e,1)):i.event!=s&&""!==s||i.scope!=l||(t.removeEventListener(i.event,i.callback,i.options),t._mQuery.events.splice(e,1))}})}),this}trigger(e,t){let i;return i=e instanceof Event||e instanceof CustomEvent?e:new(["click","dblclick","mousedown","mouseup","mousemove"].includes(e)?MouseEvent:["keydown","keyup","keypress"].includes(e)?KeyboardEvent:Event)(e,t),this.each(e=>{e.dispatchEvent(i)}),this}attr(t,i){if(void 0===i&&"string"==typeof t)return this[0]?this[0].getAttribute(t):void 0;{let e={};return"object"==typeof t?e=t:e[t]=i,this.each(i=>{Object.entries(e).forEach(([e,t])=>{i.setAttribute(e,t)})}),this}}removeAttr(){return this.each(t=>{Array.from(arguments).forEach(e=>{t.removeAttribute(e)})}),this}prop(t,i){if(void 0===i&&"string"==typeof t)return this[0]?this[0][t]:void 0;{let e={};return"object"==typeof t?e=t:e[t]=i,this.each(i=>{Object.entries(e).forEach(([e,t])=>{e=Query._fixProp(e);i[e]=t,"innerHTML"==e&&Query._scriptConvert(i)})}),this}}removeProp(){return this.each(t=>{Array.from(arguments).forEach(e=>{delete t[Query._fixProp(e)]})}),this}data(i,t){if(i instanceof Object)Object.entries(i).forEach(e=>{this.data(e[0],e[1])});else{if(i&&-1!=i.indexOf("-")&&console.error(`Key "${i}" contains "-" (dash). Dashes are not allowed in property names. Use camelCase instead.`),!(arguments.length<2))return this.each(e=>{null!=t?e.dataset[i]=t instanceof Object?JSON.stringify(t):t:delete e.dataset[i]}),this;if(this[0]){let t=Object.assign({},this[0].dataset);return Object.keys(t).forEach(e=>{if(t[e].startsWith("[")||t[e].startsWith("{"))try{t[e]=JSON.parse(t[e])}catch(e){}}),i?t[i]:t}}}removeData(e){return"string"==typeof e&&(e=e.split(/[,\s]+/)),this.each(t=>{e.forEach(e=>{delete t.dataset[e]})}),this}show(){return this.toggle(!0)}hide(){return this.toggle(!1)}toggle(l){return this.each(e=>{var t=e.style.display,i=getComputedStyle(e).display,s="none"==t||"none"==i;!s||null!=l&&!0!==l||(e.style.display=e._mQuery?.prevDisplay??(t==i&&"none"!=i?"":"block"),this._save(e,"prevDisplay",null)),s||null!=l&&!1!==l||("none"!=i&&this._save(e,"prevDisplay",i),e.style.setProperty("display","none"))})}empty(){return this.html("")}html(e){return this.prop("innerHTML",e)}text(e){return this.prop("textContent",e)}val(e){return this.prop("value",e)}change(){return this.trigger("change")}click(){return this.trigger("click")}}let query=function(e,t){if("function"!=typeof e)return new Query(e,t);"complete"==document.readyState?e():window.addEventListener("load",e)},w2ui=(query.html=e=>{e=Query._fragment(e);return query(e.children,e)},query.version=Query.version,{});class Utils{constructor(){this.version="2.0.x",this.tmp={},this.settings=this.extend({},{dataType:"HTTPJSON",dateStartYear:1950,dateEndYear:2030,macButtonOrder:!1,warnNoPhrase:!1},w2locale,{phrases:null}),this.i18nCompare=Intl.Collator().compare,this.hasLocalStorage=function(){var e="w2ui_test";try{return localStorage.setItem(e,e),localStorage.removeItem(e),!0}catch(e){return!1}}(),this.isMac=/Mac/i.test(navigator.platform),this.isMobile=/(iphone|ipod|ipad|mobile|android)/i.test(navigator.userAgent),this.isIOS=/(iphone|ipod|ipad)/i.test(navigator.platform),this.isAndroid=/(android)/i.test(navigator.userAgent),this.isSafari=/^((?!chrome|android).)*safari/i.test(navigator.userAgent),this.formatters={number(e,t){return 20'+w2utils.formatDate(i,t)+""},datetime(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";let i=w2utils.isDateTime(e,t,!0);return''+w2utils.formatDateTime(i,t)+""},time(e,t){if(""===t&&(t=w2utils.settings.timeFormat),null==e||0===e||""===e)return"";let i=w2utils.isDateTime(e,t="h24"===(t="h12"===t?"hh:mi pm":t)?"h24:mi":t,!0);return''+w2utils.formatTime(e,t)+""},timestamp(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";let i=w2utils.isDateTime(e,t,!0);return(i=!1===i?w2utils.isDate(e,t,!0):i).toString?i.toString():""},gmt(e,t){if(""===t&&(t=w2utils.settings.datetimeFormat),null==e||0===e||""===e)return"";let i=w2utils.isDateTime(e,t,!0);return(i=!1===i?w2utils.isDate(e,t,!0):i).toUTCString?i.toUTCString():""},age(e,t){if(null==e||0===e||""===e)return"";let i=w2utils.isDateTime(e,null,!0);return''+w2utils.age(e)+(t?" "+t:"")+""},interval(e,t){return null==e||0===e||""===e?"":w2utils.interval(e)+(t?" "+t:"")},toggle(e,t){return e?"Yes":""},password(t,e){let i="";for(let e=0;ei||!this.isInt(e[0])||2'+(r=l==e?this.lang("Yesterday"):r)+""}formatSize(e){var t;return this.isFloat(e)&&""!==e?0===(e=parseFloat(e))?0:(t=parseInt(Math.floor(Math.log(e)/Math.log(1024))),(Math.floor(e/Math.pow(1024,t)*10)/10).toFixed(0===t?0:1)+" "+(["Bt","KB","MB","GB","TB","PB","EB","ZB"][t]||"??")):""}formatNumber(e,t,i){return null==e||""===e||"object"==typeof e?"":(i={minimumFractionDigits:t,maximumFractionDigits:t,useGrouping:i},(null==t||t<0)&&(i.minimumFractionDigits=0,i.maximumFractionDigits=20),parseFloat(e).toLocaleString(this.settings.locale,i))}formatDate(e,t){if(t=t||this.settings.dateFormat,""===e||null==e||"object"==typeof e&&!e.getMonth)return"";let i=new Date(e);var s,l;return this.isInt(e)&&(i=new Date(Number(e))),"Invalid Date"===String(i)?"":(e=i.getFullYear(),s=i.getMonth(),l=i.getDate(),t.toLowerCase().replace("month",this.settings.fullmonths[s]).replace("mon",this.settings.shortmonths[s]).replace(/yyyy/g,("000"+e).slice(-4)).replace(/yyy/g,("000"+e).slice(-4)).replace(/yy/g,("0"+e).slice(-2)).replace(/(^|[^a-z$])y/g,"$1"+e).replace(/mm/g,("0"+(s+1)).slice(-2)).replace(/dd/g,("0"+l).slice(-2)).replace(/th/g,1==l?"st":"th").replace(/th/g,2==l?"nd":"th").replace(/th/g,3==l?"rd":"th").replace(/(^|[^a-z$])m/g,"$1"+(s+1)).replace(/(^|[^a-z$])d/g,"$1"+l))}formatTime(e,t){if(t=t||this.settings.timeFormat,""===e||null==e||"object"==typeof e&&!e.getMonth)return"";let i=new Date(e);if(this.isInt(e)&&(i=new Date(Number(e))),this.isTime(e)&&(e=this.isTime(e,!0),(i=new Date).setHours(e.hours),i.setMinutes(e.minutes)),"Invalid Date"===String(i))return"";let s="am",l=i.getHours();e=i.getHours();let r=i.getMinutes(),n=i.getSeconds();return r<10&&(r="0"+r),n<10&&(n="0"+n),-1===t.indexOf("am")&&-1===t.indexOf("pm")||(12<=l&&(s="pm"),12{i[t]=this.stripSpaces(e)}):(i=this.extend({},i),Object.keys(i).forEach(e=>{i[e]=this.stripSpaces(i[e])}))}return i}stripTags(i){if(null!=i)switch(typeof i){case"number":break;case"string":i=String(i).replace(/<(?:[^>=]|='[^']*'|="[^"]*"|=[^'"][^\s>]*)*>/gi,"");break;case"object":Array.isArray(i)?(i=this.extend([],i)).forEach((e,t)=>{i[t]=this.stripTags(e)}):(i=this.extend({},i),Object.keys(i).forEach(e=>{i[e]=this.stripTags(i[e])}))}return i}encodeTags(i){if(null!=i)switch(typeof i){case"number":break;case"string":i=String(i).replace(/&/g,"&").replace(/>/g,">").replace(/{i[t]=this.encodeTags(e)}):(i=this.extend({},i),Object.keys(i).forEach(e=>{i[e]=this.encodeTags(i[e])}))}return i}decodeTags(i){if(null!=i)switch(typeof i){case"number":break;case"string":i=String(i).replace(/>/g,">").replace(/</g,"<").replace(/"/g,'"').replace(/&/g,"&");break;case"object":Array.isArray(i)?(i=this.extend([],i)).forEach((e,t)=>{i[t]=this.decodeTags(e)}):(i=this.extend({},i),Object.keys(i).forEach(e=>{i[e]=this.decodeTags(i[e])}))}return i}escapeId(e){return""===e||null==e?"":(e+"").replace(/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,(e,t)=>t?"\0"===e?"�":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e)}unescapeId(e){return""===e||null==e?"":e.replace(/\\[\da-fA-F]{1,6}[\x20\t\r\n\f]?|\\([^\r\n\f])/g,(e,t)=>{e="0x"+e.slice(1)-65536;return t||(e<0?String.fromCharCode(65536+e):String.fromCharCode(e>>10|55296,1023&e|56320))})}base64encode(e){let t="",i,s,l,r,n,a,o,h=0;var d="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";for(e=function(t){t=String(t).replace(/\r\n/g,"\n");let i="";for(let e=0;e>6|192))+String.fromCharCode(63&s|128):(i=(i+=String.fromCharCode(s>>12|224))+String.fromCharCode(s>>6&63|128))+String.fromCharCode(63&s|128)}return i}(e);h>2,n=(3&i)<<4|s>>4,a=(15&s)<<2|l>>6,o=63&l,isNaN(s)?a=o=64:isNaN(l)&&(o=64),t=t+d.charAt(r)+d.charAt(n)+d.charAt(a)+d.charAt(o);return t}base64decode(e){let t="";var i,s,l,r,n,a;let o=0;var h="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";for(e=e.replace(/[^A-Za-z0-9\+\/\=]/g,"");o>2,s=(3&n)<<6|(a=h.indexOf(e.charAt(o++))),t+=String.fromCharCode(l<<2|r>>4),64!==n&&(t+=String.fromCharCode(i)),64!==a&&(t+=String.fromCharCode(s));return t=function(e){let t="",i=0,s=0,l,r;for(;i{return Array.from(new Uint8Array(e)).map(e=>e.toString(16).padStart(2,"0")).join("")})}transition(r,n,a,o){return new Promise((e,t)=>{var i=getComputedStyle(r);let s=parseInt(i.width),l=parseInt(i.height);if(r&&n){switch(r.parentNode.style.cssText+="perspective: 900px; overflow: hidden;",r.style.cssText+="; position: absolute; z-index: 1019; backface-visibility: hidden",n.style.cssText+="; position: absolute; z-index: 1020; backface-visibility: hidden",a){case"slide-left":r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; transform: translate3d("+s+"px, 0, 0)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",r.style.cssText+="transition: 0.5s; transform: translate3d(-"+s+"px, 0, 0)"},1);break;case"slide-right":r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; transform: translate3d(-"+s+"px, 0, 0)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: translate3d(0px, 0, 0)",r.style.cssText+="transition: 0.5s; transform: translate3d("+s+"px, 0, 0)"},1);break;case"slide-down":r.style.cssText+="overflow: hidden; z-index: 1; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; z-index: 0; transform: translate3d(0, 0, 0)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",r.style.cssText+="transition: 0.5s; transform: translate3d(0, "+l+"px, 0)"},1);break;case"slide-up":r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; transform: translate3d(0, "+l+"px, 0)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)",r.style.cssText+="transition: 0.5s; transform: translate3d(0, 0, 0)"},1);break;case"flip-left":r.style.cssText+="overflow: hidden; transform: rotateY(0deg)",n.style.cssText+="overflow: hidden; transform: rotateY(-180deg)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: rotateY(0deg)",r.style.cssText+="transition: 0.5s; transform: rotateY(180deg)"},1);break;case"flip-right":r.style.cssText+="overflow: hidden; transform: rotateY(0deg)",n.style.cssText+="overflow: hidden; transform: rotateY(180deg)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: rotateY(0deg)",r.style.cssText+="transition: 0.5s; transform: rotateY(-180deg)"},1);break;case"flip-down":r.style.cssText+="overflow: hidden; transform: rotateX(0deg)",n.style.cssText+="overflow: hidden; transform: rotateX(180deg)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: rotateX(0deg)",r.style.cssText+="transition: 0.5s; transform: rotateX(-180deg)"},1);break;case"flip-up":r.style.cssText+="overflow: hidden; transform: rotateX(0deg)",n.style.cssText+="overflow: hidden; transform: rotateX(-180deg)",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: rotateX(0deg)",r.style.cssText+="transition: 0.5s; transform: rotateX(180deg)"},1);break;case"pop-in":r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); transform: scale(.8); opacity: 0;",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; transform: scale(1); opacity: 1;",r.style.cssText+="transition: 0.5s;"},1);break;case"pop-out":r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); transform: scale(1); opacity: 1;",n.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0); opacity: 0;",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; opacity: 1;",r.style.cssText+="transition: 0.5s; transform: scale(1.7); opacity: 0;"},1);break;default:r.style.cssText+="overflow: hidden; transform: translate3d(0, 0, 0)",n.style.cssText+="overflow: hidden; translate3d(0, 0, 0); opacity: 0;",query(n).show(),setTimeout(()=>{n.style.cssText+="transition: 0.5s; opacity: 1;",r.style.cssText+="transition: 0.5s"},1)}setTimeout(()=>{"slide-down"===a&&(query(r).css("z-index","1019"),query(n).css("z-index","1020")),n&&query(n).css({opacity:"1"}).css({transition:"",transform:""}),r&&query(r).css({opacity:"1"}).css({transition:"",transform:""}),"function"==typeof o&&o(),e()},500)}else console.log("ERROR: Cannot do transition when one of the divs is null")})}lock(i,s={}){if(null!=i){"string"==typeof s&&(s={msg:s}),arguments[2]&&(s.spinner=arguments[2]),s=this.extend({spinner:!1},s),i?.[0]instanceof Node&&(i=Array.isArray(i)?i:i.get()),s.msg||0===s.msg||(s.msg=""),this.unlock(i),query(i).prepend('
      ');let e=query(i).find(".w2ui-lock");i=query(i).find(".w2ui-lock-msg"),i=(s.msg||i.css({"background-color":"transparent","background-image":"none",border:"0px","box-shadow":"none"}),!0===s.spinner&&(s.msg=`
      `+s.msg),s.msg?i.html(s.msg).css("display","block"):i.remove(),null!=s.opacity&&e.css("opacity",s.opacity),e.css({display:"block"}),s.bgColor&&e.css({"background-color":s.bgColor}),getComputedStyle(e.get(0)));let t=i.opacity??.15;e.on("mousedown",function(){"function"==typeof s.onClick?s.onClick():e.css({transition:".2s",opacity:1.5*t})}).on("mouseup",function(){"function"!=typeof s.onClick&&e.css({transition:".2s",opacity:t})}).on("mousewheel",function(e){e&&(e.stopPropagation(),e.preventDefault())})}}unlock(e,t){null!=e&&(clearTimeout(e._prevUnlock),e?.[0]instanceof Node&&(e=Array.isArray(e)?e:e.get()),this.isInt(t)&&0{query(e).find(".w2ui-lock").remove()},t)):(query(e).find(".w2ui-lock").remove(),query(e).find(".w2ui-lock-msg").remove()))}message(r,s){let e,t,l;var i=()=>{var e=query(r?.box).find(".w2ui-message");0!=e.length&&"function"==typeof(s=e.get(0)._msg_options||{})?.close&&s.close()};let n=e=>{var t,i=e.box._msg_prevFocus;query(r.box).find(".w2ui-message").length<=1?r.owner?r.owner.unlock(r.param,150):this.unlock(r.box,150):query(r.box).find(`#w2ui-message-${r.owner?.name}-`+(e.msgIndex-1)).css("z-index",1500),i?0<(t=query(i).closest(".w2ui-message")).length?t.get(0)._msg_options.setFocus(i):i.focus():"function"==typeof r.owner?.focus&&r.owner.focus(),query(e.box).remove(),0===e.msgIndex&&(c.css("z-index",e.tmp.zIndex),query(r.box).css("overflow",e.tmp.overflow)),e.trigger&&l.finish()};if("object"!=typeof(s="string"!=typeof s&&"number"!=typeof s?s:{width:String(s).length<300?350:550,height:String(s).length<300?170:250,text:String(s)}))return void i();null!=s.text&&(s.body=`
      ${s.text}
      `),null==s.width&&(s.width=350),null==s.height&&(s.height=170),null==s.hideOn&&(s.hideOn=["esc"]),null==s.on&&(h=s,s=new w2base,w2utils.extend(s,h)),s.on("open",e=>{w2utils.bindEvents(query(s.box).find(".w2ui-eaction"),s),query(e.detail.box).find("button, input, textarea, [name=hidden-first]").off(".message").on("keydown.message",function(e){27==e.keyCode&&s.hideOn.includes("esc")&&(s.cancelAction?s.action(s.cancelAction):s.close())}),s.setFocus(s.focus)}),s.off(".prom");let a={self:s,action(e){return s.on("action.prom",e),a},close(e){return s.on("close.prom",e),a},open(e){return s.on("open.prom",e),a},then(e){return s.on("open:after.prom",e),a}},o=(null==s.actions&&null==s.buttons&&null==s.html&&(s.actions={Ok(e){e.detail.self.close()}}),s.off(".buttons"),null!=s.actions&&(s.buttons="",Object.keys(s.actions).forEach(e=>{var t=s.actions[e];let i=e;"function"==typeof t&&(s.buttons+=``),"object"==typeof t&&(s.buttons+=``,i=Array.isArray(s.actions)?t.text:e),"string"==typeof t&&(s.buttons+=``,i=t),"string"==typeof i&&(i=i[0].toLowerCase()+i.substr(1).replace(/\s+/g,"")),a[i]=function(t){return s.on("action.buttons",e=>{e.detail.action[0].toLowerCase()+e.detail.action.substr(1).replace(/\s+/g,"")==i&&t(e)}),a}})),Array("html","body","buttons").forEach(e=>{s[e]=String(s[e]??"").trim()}),""===s.body&&""===s.buttons||(s.html=` -
      ${s.body||""}
      -
      ${s.buttons||""}
      - `),getComputedStyle(query(r.box).get(0)));var h=parseFloat(o.width),d=parseFloat(o.height);let u=0,c=(0h&&(s.width=h-10),s.height>d-u&&(s.height=d-10-u),s.originalWidth=s.width,s.originalHeight=s.height,parseInt(s.width)<0&&(s.width=h+s.width),parseInt(s.width)<10&&(s.width=10),parseInt(s.height)<0&&(s.height=d+s.height-u),parseInt(s.height)<10&&(s.height=10),s.originalHeight<0&&(s.height=d+s.originalHeight-u),s.originalWidth<0&&(s.width=h+2*s.originalWidth),query(r.box).find(r.after));return s.tmp||(s.tmp={zIndex:c.css("z-index"),overflow:o.overflow}),""===s.html&&""===s.body&&""===s.buttons?i():(s.msgIndex=query(r.box).find(".w2ui-message").length,0===s.msgIndex&&"function"==typeof this.lock&&(query(r.box).css("overflow","hidden"),r.owner?r.owner.lock(r.param):this.lock(r.box)),query(r.box).find(".w2ui-message").css("z-index",1390),c.css("z-index",1501),d=` -
      - - ${s.html} - -
      `,0{!0===(l=s.trigger("open",{target:this.name,box:s.box,self:s})).isCancelled?(query(r.box).find(`#w2ui-message-${r.owner?.name}-`+s.msgIndex).remove(),0===s.msgIndex&&(c.css("z-index",s.tmp.zIndex),query(r.box).css("overflow",s.tmp.overflow))):query(s.box).css({transition:"0.3s",transform:"translateY(0px)"})},0),t=setTimeout(()=>{query(r.box).find(`#w2ui-message-${r.owner?.name}-`+s.msgIndex).removeClass("animating").css({transition:"0s"}),l.finish()},300)),s.action=(e,t)=>{let i=s.actions[e];i instanceof Object&&i.onClick&&(i=i.onClick);e=s.trigger("action",{target:this.name,action:e,self:s,originalEvent:t,value:s.input?s.input.value:null});!0!==e.isCancelled&&("function"==typeof i&&i(e),e.finish())},s.close=()=>{!0!==(l=s.trigger("close",{target:"self",box:s.box,self:s})).isCancelled&&(clearTimeout(t),query(s.box).hasClass("animating")?(clearTimeout(e),n(s)):(query(s.box).addClass("w2ui-closing animating").css({transition:"0.15s",transform:"translateY(-"+s.height+"px)"}),0!==s.msgIndex&&query(r.box).find(`#w2ui-message-${r.owner?.name}-`+(s.msgIndex-1)).css("z-index",1499),e=setTimeout(()=>{n(s)},150)))},s.setFocus=e=>{var t=query(r.box).find(".w2ui-message").length-1;let s=query(r.box).find(`#w2ui-message-${r.owner?.name}-`+t),l="input, button, select, textarea, [contentEditable], .w2ui-input";(null!=e?isNaN(e)?s.find(l).filter(e).get(0):s.find(l).get(e):s.find("[name=hidden-first]").get(0))?.focus(),query(r.box).find(".w2ui-message").find(l+",[name=hidden-first],[name=hidden-last]").off(".keep-focus"),query(s).find(l+",[name=hidden-first],[name=hidden-last]").on("blur.keep-focus",function(e){setTimeout(()=>{var e=document.activeElement,t=0{if("object"==typeof i&&(i=(s=i).text),(s=s||{}).where=s.where??document.body,s.timeout=s.timeout??15e3,"function"==typeof this.tmp.notify_resolve&&(this.tmp.notify_resolve(),query(this.tmp.notify_where).find("#w2ui-notify").remove()),this.tmp.notify_resolve=t,this.tmp.notify_where=s.where,clearTimeout(this.tmp.notify_timer),i){if("object"==typeof s.actions){let t={};Object.keys(s.actions).forEach(e=>{t[e]=`${e}`}),i=this.execTemplate(i,t)}var e=` -
      -
      - ${i} - -
      -
      `;query(s.where).append(e),query(s.where).find("#w2ui-notify").find(".w2ui-notify-close").on("click",e=>{query(s.where).find("#w2ui-notify").remove(),t()}),s.actions&&query(s.where).find("#w2ui-notify .w2ui-notify-link").on("click",e=>{e=query(e.target).attr("value");s.actions[e](),query(s.where).find("#w2ui-notify").remove(),t()}),0{query(s.where).find("#w2ui-notify").remove(),t()},s.timeout))}})}confirm(e,t){w2utils.normButtons(t="string"==typeof t?{text:t}:t,{yes:"Yes",no:"No"});e=w2utils.message(e,t);return e&&e.action(e=>{e.detail.self.close()}),e}normButtons(i,s){i.actions=i.actions??{};var e=Object.keys(s);return e.forEach(t=>{var e=i["btn_"+t];e&&(s[t]={text:w2utils.lang(e.text??""),class:e.class??"",style:e.style??"",attrs:e.attrs??""},delete i["btn_"+t]),Array("text","class","style","attrs").forEach(e=>{i[t+"_"+e]&&("string"==typeof s[t]&&(s[t]={text:s[t]}),s[t][e]=i[t+"_"+e],delete i[t+"_"+e])})}),e.includes("yes")&&e.includes("no")&&(w2utils.settings.macButtonOrder?w2utils.extend(i.actions,{no:s.no,yes:s.yes}):w2utils.extend(i.actions,{yes:s.yes,no:s.no})),e.includes("ok")&&e.includes("cancel")&&(w2utils.settings.macButtonOrder?w2utils.extend(i.actions,{cancel:s.cancel,ok:s.ok}):w2utils.extend(i.actions,{ok:s.ok,cancel:s.cancel})),i}getSize(e,t){let i=0;if(0<(e=query(e)).length){e=e[0];var s=getComputedStyle(e);switch(t){case"width":i=parseFloat(s.width),"auto"===s.width&&(i=0);break;case"height":i=parseFloat(s.height),"auto"===s.height&&(i=0)}}return i}getStrWidth(e,t){query("body").append(` -
      - ${this.encodeTags(e)} -
      `);t=query("#_tmp_width")[0].clientWidth;return query("#_tmp_width").remove(),t}execTemplate(e,i){return"string"==typeof e&&i&&"object"==typeof i?e.replace(/\${([^}]+)?}/g,function(e,t){return i[t]||t}):e}marker(e,s,l={onlyFirst:!1,wholeWord:!1}){Array.isArray(s)||(s=null!=s&&""!==s?[s]:[]);let r=l.wholeWord;query(e).each(t=>{for(var e=t,i=/\((.|\n|\r)*)\<\/span\>/gi;-1!==e.innerHTML.indexOf('{e=(e="string"!=typeof e?String(e):e).replace(/[-[\]{}()*+?.,\\^$|#\s]/g,"\\$&").replace(/&/g,"&").replace(//g,"<");e=new RegExp((r?"\\b":"")+e+(r?"\\b":"")+"(?!([^<]+)?>)","i"+(l.onlyFirst?"":"g"));t.innerHTML=t.innerHTML.replace(e,e=>''+e+"")})})}lang(e,t){if(!e||null==this.settings.phrases||"string"!=typeof e||"<=>=".includes(e))return this.execTemplate(e,t);let i=this.settings.phrases[e];return null==i?(i=e,this.settings.warnNoPhrase&&(this.settings.missing||(this.settings.missing={}),this.settings.missing[e]="---",this.settings.phrases[e]="---",console.log(`Missing translation for "%c${e}%c", see %c w2utils.settings.phrases %c with value "---"`,"color: orange","","color: #999",""))):"---"!==i||this.settings.warnNoPhrase||(i=e),"---"===i&&(i=`---`),this.execTemplate(i,t)}locale(l,i,r){return new Promise((s,t)=>{if(Array.isArray(l)){this.settings.phrases={};let i=[],t={};l.forEach((e,t)=>{5===e.length&&(e="locale/"+e.toLowerCase()+".json",l[t]=e),i.push(this.locale(e,!0,!1))}),void Promise.allSettled(i).then(e=>{e.forEach(e=>{e.value&&(t[e.value.file]=e.value.data)}),l.forEach(e=>{this.settings=this.extend({},this.settings,t[e])}),s()})}else(l=l||"en-us")instanceof Object?this.settings=this.extend({},this.settings,w2locale,l):(5===l.length&&(l="locale/"+l.toLowerCase()+".json"),fetch(l,{method:"GET"}).then(e=>e.json()).then(e=>{!0!==r&&(this.settings=i?this.extend({},this.settings,e):this.extend({},this.settings,w2locale,{phrases:{}},e)),s({file:l,data:e})}).catch(e=>{console.log("ERROR: Cannot load locale "+l),t(e)}))})}scrollBarSize(){return this.tmp.scrollBarSize||(query("body").append(` -
      -
      1
      -
      - `),this.tmp.scrollBarSize=100-query("#_scrollbar_width > div")[0].clientWidth,query("#_scrollbar_width").remove()),this.tmp.scrollBarSize}checkName(e){return null==e?(console.log('ERROR: Property "name" is required but not supplied.'),!1):null!=w2ui[e]?(console.log(`ERROR: Object named "${e}" is already registered as w2ui.${e}.`),!1):!!this.isAlphaNumeric(e)||(console.log('ERROR: Property "name" has to be alpha-numeric (a-z, 0-9, dash and underscore).'),!1)}checkUniqueId(t,i,s,l){Array.isArray(i)||(i=[i]);let r=!0;return i.forEach(e=>{e.id===t&&(console.log(`ERROR: The item id="${t}" is not unique within the ${s} "${l}".`,i),r=!1)}),r}encodeParams(t,i=""){let s="";return Object.keys(t).forEach(e=>{""!=s&&(s+="&"),"object"==typeof t[e]?s+=this.encodeParams(t[e],i+e+(i?"]":"")+"["):s+=""+i+e+(i?"]":"")+"="+t[e]}),s}parseRoute(e){let n=[];e=e.replace(/\/\(/g,"(?:/").replace(/\+/g,"__plus__").replace(/(\/)?(\.)?:(\w+)(?:(\(.*?\)))?(\?)?/g,(e,t,i,s,l,r)=>(n.push({name:s,optional:!!r}),t=t||"",(r?"":t)+"(?:"+(r?t:"")+(i||"")+(l||(i?"([^/.]+?)":"([^/]+?)"))+")"+(r||""))).replace(/([\/.])/g,"\\$1").replace(/__plus__/g,"(.+)").replace(/\*/g,"(.*)");return{path:new RegExp("^"+e+"$","i"),keys:n}}getCursorPosition(e){if(null==e)return null;let t=0;var i,s=e.ownerDocument||e.document,l=s.defaultView||s.parentWindow;let r;return["INPUT","TEXTAREA"].includes(e.tagName)?t=e.selectionStart:l.getSelection?0<(r=l.getSelection()).rangeCount&&((i=(l=r.getRangeAt(0)).cloneRange()).selectNodeContents(e),i.setEnd(l.endContainer,l.endOffset),t=i.toString().length):(r=s.selection)&&"Control"!==r.type&&(l=r.createRange(),(i=s.body.createTextRange()).moveToElementText(e),i.setEndPoint("EndToEnd",l),t=i.text.length),t}setCursorPosition(s,l,t){if(null!=s){var r=document.createRange();let i,e=window.getSelection();if(["INPUT","TEXTAREA"].includes(s.tagName))s.setSelectionRange(l,t??l);else{for(let t=0;t").replace(/&/g,"&").replace(/"/g,'"').replace(/ /g," "):e).length){(i=(i=s.childNodes[t]).childNodes&&0i.length&&(l=i.length),r.setStart(i,l),t?r.setEnd(i,t):r.collapse(!0),e.removeAllRanges(),e.addRange(r))}}}parseColor(e){if("string"!=typeof e)return null;let t={};if(3===(e="#"===(e=e.trim().toUpperCase())[0]?e.substr(1):e).length)t={r:parseInt(e[0]+e[0],16),g:parseInt(e[1]+e[1],16),b:parseInt(e[2]+e[2],16),a:1};else if(6===e.length)t={r:parseInt(e.substr(0,2),16),g:parseInt(e.substr(2,2),16),b:parseInt(e.substr(4,2),16),a:1};else if(8===e.length)t={r:parseInt(e.substr(0,2),16),g:parseInt(e.substr(2,2),16),b:parseInt(e.substr(4,2),16),a:Math.round(parseInt(e.substr(6,2),16)/255*100)/100};else if(4{s[t]=this.clone(e,i)}):this.isPlainObject(e)?(s={},Object.assign(s,e),i.exclude&&i.exclude.forEach(e=>{delete s[e]}),Object.keys(s).forEach(e=>{s[e]=this.clone(s[e],i),void 0===s[e]&&delete s[e]})):e instanceof Function&&!i.functions||e instanceof Node&&!i.elements||e instanceof Event&&!i.events||(s=e),s}extend(i,s){if(Array.isArray(i)){if(!Array.isArray(s))throw new Error("Arrays can be extended with arrays only");i.splice(0,i.length),s.forEach(e=>{i.push(this.clone(e))})}else{if(i instanceof Node||i instanceof Event)throw new Error("HTML elmenents and events cannot be extended");if(i&&"object"==typeof i&&null!=s){if("object"!=typeof s)throw new Error("Object can be extended with other objects only.");Object.keys(s).forEach(e=>{var t;null!=i[e]&&"object"==typeof i[e]&&null!=s[e]&&"object"==typeof s[e]?(t=this.clone(s[e]),i[e]instanceof Node||i[e]instanceof Event?i[e]=t:(Array.isArray(i[e])&&this.isPlainObject(t)&&(i[e]={}),this.extend(i[e],t))):i[e]=this.clone(s[e])})}else if(null!=s)throw new Error("Object is not extendable, only {} or [] can be extended.")}if(2{"string"==typeof e||"number"==typeof e?i[t]={id:e,text:String(e)}:null!=e?(null!=e.caption&&null==e.text&&(e.text=e.caption),null!=e.text&&null==e.id&&(e.id=e.text),null==e.text&&null!=e.id&&(e.text=e.id)):i[t]={id:null,text:"null"}}),i):"function"==typeof i?(e=i.call(this,i,e),w2utils.normMenu.call(this,e)):"object"==typeof i?Object.keys(i).map(e=>({id:e,text:i[e]})):void 0}bindEvents(e,r){0!=e.length&&(e?.[0]instanceof Node&&(e=Array.isArray(e)?e:e.get()),query(e).each(s=>{let l=query(s).data();Object.keys(l).forEach(i=>{if(-1!=["click","dblclick","mouseenter","mouseleave","mouseover","mouseout","mousedown","mousemove","mouseup","contextmenu","focus","focusin","focusout","blur","input","change","keydown","keyup","keypress"].indexOf(String(i).toLowerCase())){let e=l[i],t=(e="string"==typeof e?e.split("|").map(e=>{"null"===(e="undefined"===(e="false"===(e="true"===e?!0:e)?!1:e)?void 0:e)&&(e=null);var t=["'",'"',"`"];return e="string"==typeof(e=parseFloat(e)==e?parseFloat(e):e)&&t.includes(e[0])&&t.includes(e[e.length-1])?e.substring(1,e.length-1):e}):e)[0];e=e.slice(1),query(s).off(i+".w2utils-bind").on(i+".w2utils-bind",function(i){switch(t){case"alert":alert(e[0]);break;case"stop":i.stopPropagation();break;case"prevent":i.preventDefault();break;case"stopPrevent":return i.stopPropagation(),i.preventDefault(),!1;default:if(null==r[t])throw new Error(`Cannot dispatch event as the method "${t}" does not exist.`);r[t].apply(r,e.map((e,t)=>{switch(String(e).toLowerCase()){case"event":return i;case"this":return this;default:return e}}))}})}})}))}}var w2utils=new Utils;class Dialog extends w2base{constructor(){super(),this.defaults={title:"",text:"",body:"",buttons:"",width:450,height:250,focus:null,actions:null,style:"",speed:.3,modal:!1,maximized:!1,keyboard:!0,showClose:!0,showMax:!1,transition:null,openMaximized:!1,moved:!1},this.name="popup",this.status="closed",this.onOpen=null,this.onClose=null,this.onMax=null,this.onMin=null,this.onToggle=null,this.onKeydown=null,this.onAction=null,this.onMove=null,this.tmp={},this.handleResize=e=>{this.options.moved||this.center(void 0,void 0,!0)}}open(s){let l=this;"closing"!=this.status&&!query("#w2ui-popup").hasClass("animating")||this.close(!0);var e=this.options;null!=(s=["string","number"].includes(typeof s)?w2utils.extend({title:"Notification",body:`
      ${s}
      `,actions:{Ok(){l.close()}},cancelAction:"ok"},arguments[1]??{}):s).text&&(s.body=`
      ${s.text}
      `),s=Object.assign({},this.defaults,e,{title:"",body:""},s,{maximized:!1}),this.options=s,0===query("#w2ui-popup").length&&(this.off("*"),Object.keys(this).forEach(e=>{e.startsWith("on")&&"on"!=e&&(this[e]=null)})),Object.keys(s).forEach(e=>{e.startsWith("on")&&"on"!=e&&s[e]&&(this[e]=s[e])}),s.width=parseInt(s.width),s.height=parseInt(s.height);let r,t,i;var{top:n,left:a}=this.center();let o={self:this,action(e){return l.on("action.prom",e),o},close(e){return l.on("close.prom",e),o},then(e){return l.on("open:after.prom",e),o}};if(null==s.actions||s.buttons||(s.buttons="",Object.keys(s.actions).forEach(e=>{var t=s.actions[e];let i=e;"function"==typeof t&&(s.buttons+=``),"object"==typeof t&&(s.buttons+=``,i=Array.isArray(s.actions)?t.text:e),"string"==typeof t&&(s.buttons+=``,i=t),"string"==typeof i&&(i=i[0].toLowerCase()+i.substr(1).replace(/\s+/g,"")),o[i]=function(t){return l.on("action.buttons",e=>{e.detail.action[0].toLowerCase()+e.detail.action.substr(1).replace(/\s+/g,"")==i&&t(e)}),o}})),0===query("#w2ui-popup").length){if(!0===(r=this.trigger("open",{target:"popup",present:!1})).isCancelled)return;this.status="opening",w2utils.lock(document.body,{opacity:.3,onClick:s.modal?null:()=>{this.close()}});let e="";s.showClose&&(e+=`
      - -
      `),s.showMax&&(e+=`
      - -
      `);a=` - left: ${a}px; - top: ${n}px; - width: ${parseInt(s.width)}px; - height: ${parseInt(s.height)}px; - transition: ${s.speed}s - `;t=`
      `,query("body").append(t),query("#w2ui-popup")[0]._w2popup={self:this,created:new Promise(e=>{this._promCreated=e}),opened:new Promise(e=>{this._promOpened=e}),closing:new Promise(e=>{this._promClosing=e}),closed:new Promise(e=>{this._promClosed=e})},a=`${s.title?"":"top: 0px !important;"} `+(s.buttons?"":"bottom: 0px !important;"),t=` - -
      ${e}
      -
      -
      -
      -
      -
      - - `,query("#w2ui-popup").html(t),s.title&&query("#w2ui-popup .w2ui-popup-title").append(w2utils.lang(s.title)),s.buttons&&query("#w2ui-popup .w2ui-popup-buttons").append(s.buttons),s.body&&query("#w2ui-popup .w2ui-popup-body").append(s.body),setTimeout(()=>{query("#w2ui-popup").css("transition",s.speed+"s").removeClass("w2ui-anim-open"),w2utils.bindEvents("#w2ui-popup .w2ui-eaction",this),query("#w2ui-popup").find(".w2ui-popup-body").show(),this._promCreated()},1),clearTimeout(this._timer),this._timer=setTimeout(()=>{this.status="open",l.setFocus(s.focus),r.finish(),this._promOpened(),query("#w2ui-popup").removeClass("animating")},1e3*s.speed)}else{if(!0===(r=this.trigger("open",{target:"popup",present:!0})).isCancelled)return;this.status="opening",null!=e&&(e.maximized||e.width==s.width&&e.height==s.height||this.resize(s.width,s.height),s.prevSize=s.width+"px:"+s.height+"px",s.maximized=e.maximized);n=query("#w2ui-popup .w2ui-box").get(0).cloneNode(!0);query(n).removeClass("w2ui-box").addClass("w2ui-box-temp").find(".w2ui-popup-body").empty().append(s.body),query("#w2ui-popup .w2ui-box").after(n),s.buttons?(query("#w2ui-popup .w2ui-popup-buttons").show().html("").append(s.buttons),query("#w2ui-popup .w2ui-popup-body").removeClass("w2ui-popup-no-buttons"),query("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("bottom","")):(query("#w2ui-popup .w2ui-popup-buttons").hide().html(""),query("#w2ui-popup .w2ui-popup-body").addClass("w2ui-popup-no-buttons"),query("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("bottom","0px")),s.title?(query("#w2ui-popup .w2ui-popup-title").show().html((s.showClose?`
      - -
      `:"")+(s.showMax?`
      - -
      `:"")).append(s.title),query("#w2ui-popup .w2ui-popup-body").removeClass("w2ui-popup-no-title"),query("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("top","")):(query("#w2ui-popup .w2ui-popup-title").hide().html(""),query("#w2ui-popup .w2ui-popup-body").addClass("w2ui-popup-no-title"),query("#w2ui-popup .w2ui-box, #w2ui-popup .w2ui-box-temp").css("top","0px"));let t=query("#w2ui-popup .w2ui-box")[0],i=query("#w2ui-popup .w2ui-box-temp")[0];query("#w2ui-popup").addClass("animating"),w2utils.transition(t,i,s.transition,()=>{query(t).remove(),query(i).removeClass("w2ui-box-temp").addClass("w2ui-box");var e=query(i).find(".w2ui-popup-body");1==e.length&&(e[0].style.cssText=s.style,e.show()),l.setFocus(s.focus),query("#w2ui-popup").removeClass("animating")}),this.status="open",r.finish(),w2utils.bindEvents("#w2ui-popup .w2ui-eaction",this),query("#w2ui-popup").find(".w2ui-popup-body").show()}return s.openMaximized&&this.max(),s._last_focus=document.activeElement,s.keyboard&&query(document.body).on("keydown",e=>{this.keydown(e)}),query(window).on("resize",this.handleResize),i={resizing:!1,mvMove:function(e){1==i.resizing&&(e=e||window.event,i.div_x=e.screenX-i.x,i.div_y=e.screenY-i.y,!0!==(e=l.trigger("move",{target:"popup",div_x:i.div_x,div_y:i.div_y,originalEvent:e})).isCancelled&&(query("#w2ui-popup").css({transition:"none",transform:"translate3d("+i.div_x+"px, "+i.div_y+"px, 0px)"}),l.options.moved=!0,e.finish()))},mvStop:function(e){1==i.resizing&&(e=e||window.event,l.status="open",i.div_x=e.screenX-i.x,i.div_y=e.screenY-i.y,query("#w2ui-popup").css({left:i.pos_x+i.div_x+"px",top:i.pos_y+i.div_y+"px"}).css({transition:"none",transform:"translate3d(0px, 0px, 0px)"}),i.resizing=!1,query(document.body).off(".w2ui-popup"),i.isLocked||l.unlock())}},query("#w2ui-popup .w2ui-popup-title").on("mousedown",function(e){var t;l.options.maximized||(e=(e=e)||window.event,l.status="moving",t=query("#w2ui-popup").get(0).getBoundingClientRect(),Object.assign(i,{resizing:!0,isLocked:1==query("#w2ui-popup > .w2ui-lock").length,x:e.screenX,y:e.screenY,pos_x:t.x,pos_y:t.y}),i.isLocked||l.lock({opacity:0}),query(document.body).on("mousemove.w2ui-popup",i.mvMove).on("mouseup.w2ui-popup",i.mvStop),e.stopPropagation?e.stopPropagation():e.cancelBubble=!0,e.preventDefault&&e.preventDefault())}),o}load(s){return new Promise((i,e)=>{if(null==(s="string"==typeof s?{url:s}:s).url)console.log("ERROR: The url is not defined."),e("The url is not defined");else{this.status="loading";let[e,t]=String(s.url).split("#");e&&fetch(e).then(e=>e.text()).then(e=>{i(this.template(e,t,s))})}})}template(t,e,i={}){let s;try{s=query(t)}catch(e){s=query.html(t)}return e&&(s=s.filter("#"+e)),Object.assign(i,{width:parseInt(query(s).css("width")),height:parseInt(query(s).css("height")),title:query(s).find("[rel=title]").html(),body:query(s).find("[rel=body]").html(),buttons:query(s).find("[rel=buttons]").html(),style:query(s).find("[rel=body]").get(0).style.cssText}),this.open(i)}action(e,t){let i=this.options.actions[e];i instanceof Object&&i.onClick&&(i=i.onClick);e=this.trigger("action",{action:e,target:"popup",self:this,originalEvent:t,value:this.input?this.input.value:null});!0!==e.isCancelled&&("function"==typeof i&&i.call(this,t),e.finish())}keydown(e){var t;this.options&&!this.options.keyboard||!0!==(t=this.trigger("keydown",{target:"popup",originalEvent:e})).isCancelled&&(27===e.keyCode&&(e.preventDefault(),0==query("#w2ui-popup .w2ui-message").length&&(this.options.cancelAction?this.action(this.options.cancelAction):this.close())),t.finish())}close(e){let t=this.trigger("close",{target:"popup"});var i;!0!==t.isCancelled&&(i=()=>{query("#w2ui-popup").remove(),this.options._last_focus&&0{e.finish()},1e3*this.options.speed+50))}max(){if(!0!==this.options.maximized){let e=this.trigger("max",{target:"popup"});var t;!0!==e.isCancelled&&(this.status="resizing",t=query("#w2ui-popup").get(0).getBoundingClientRect(),this.options.prevSize=t.width+":"+t.height,this.resize(1e4,1e4,()=>{this.status="open",this.options.maximized=!0,e.finish()}))}}min(){if(!0===this.options.maximized){var t=this.options.prevSize.split(":");let e=this.trigger("min",{target:"popup"});!0!==e.isCancelled&&(this.status="resizing",this.options.maximized=!1,this.resize(parseInt(t[0]),parseInt(t[1]),()=>{this.status="open",this.options.prevSize=null,e.finish()}))}}clear(){query("#w2ui-popup .w2ui-popup-title").html(""),query("#w2ui-popup .w2ui-popup-body").html(""),query("#w2ui-popup .w2ui-popup-buttons").html("")}reset(){this.open(this.defaults)}message(e){return w2utils.message({owner:this,box:query("#w2ui-popup").get(0),after:".w2ui-popup-title"},e)}confirm(e){return w2utils.confirm({owner:this,box:query("#w2ui-popup"),after:".w2ui-popup-title"},e)}setFocus(e){let s=query("#w2ui-popup"),l="input, button, select, textarea, [contentEditable], .w2ui-input";null!=e?(isNaN(e)?s.find(l).filter(e).get(0):s.find(l).get(e))?.focus():(e=s.find("[name=hidden-first]").get(0))&&e.focus(),query(s).find(l+",[name=hidden-first],[name=hidden-last]").off(".keep-focus").on("blur.keep-focus",function(e){setTimeout(()=>{var e=document.activeElement,t=0{s.resizeMessages()},10);setTimeout(()=>{clearInterval(a),s.resizeMessages(),"function"==typeof i&&i()},1e3*this.options.speed+50)}resizeMessages(){query("#w2ui-popup .w2ui-message").each(e=>{var t=e._msg_options,i=query("#w2ui-popup"),s=(parseInt(t.width)<10&&(t.width=10),parseInt(t.height)<10&&(t.height=10),i[0].getBoundingClientRect()),i=parseInt(i.find(".w2ui-popup-title")[0].clientHeight),l=parseInt(s.width),s=parseInt(s.height);t.width=t.originalWidth,t.width>l-10&&(t.width=l-10),t.height=t.originalHeight,t.height>s-i-5&&(t.height=s-i-5),t.originalHeight<0&&(t.height=s+t.originalHeight-i),t.originalWidth<0&&(t.width=l+2*t.originalWidth),query(e).css({left:(l-t.width)/2+"px",width:t.width+"px",height:t.height+"px"})})}}function w2alert(e,t,i){let s;t={title:w2utils.lang(t??"Notification"),body:`
      ${e}
      `,showClose:!1,actions:["Ok"],cancelAction:"ok"};return(s=0{"function"==typeof e.detail.self?.close&&e.detail.self.close(),"function"==typeof i&&i()}),s}function w2confirm(e,t,i){let s,l=e;return(l=["string","number"].includes(typeof l)?{msg:l}:l).msg&&(l.body=`
      ${l.msg}
      `,delete l.msg),w2utils.extend(l,{title:w2utils.lang(t??"Confirmation"),showClose:!1,modal:!0,cancelAction:"no"}),w2utils.normButtons(l,{yes:"Yes",no:"No"}),(s=0{"function"==typeof e.detail.self?.close&&e.detail.self.close(),"function"==typeof i&&i(e.detail.action)}),s}function w2prompt(e,t,i){let s,l=e;return(l=["string","number"].includes(typeof l)?{label:l}:l).label&&(l.focus=0,l.body=l.textarea?`
      -
      ${l.label}
      - -
      `:`
      - - -
      `),w2utils.extend(l,{title:w2utils.lang(t??"Notification"),showClose:!1,modal:!0,cancelAction:"cancel"}),w2utils.normButtons(l,{ok:"Ok",cancel:"Cancel"}),(s=0{e=e.detail.box||query("#w2ui-popup .w2ui-popup-body").get(0);w2utils.bindEvents(query(e).find("#w2prompt"),{keydown(e){27==e.keyCode&&e.stopPropagation()},change(e){var t=s.self.trigger("change",{target:"prompt",originalEvent:e});!0!==t.isCancelled&&(13==e.keyCode&&e.ctrlKey&&s.self.action("Ok",e),27==e.keyCode&&s.self.action("Cancel",e),t.finish())}}),query(e).find(".w2ui-eaction").trigger("keyup")}).on("action:after.prompt",e=>{"function"==typeof e.detail.self?.close&&e.detail.self.close(),"function"==typeof i&&i(e.detail.action)}),s}let w2popup=new Dialog;class Tooltip{static active={};constructor(){this.defaults={name:null,html:"",style:"",class:"",position:"top|bottom",align:"",anchor:null,anchorClass:"",anchorStyle:"",autoShow:!1,autoShowOn:null,autoHideOn:null,arrowSize:8,margin:0,margin:1,screenMargin:2,autoResize:!0,offsetX:0,offsetY:0,maxWidth:null,maxHeight:null,watchScroll:null,watchResize:null,hideOn:null,onThen:null,onShow:null,onHide:null,onUpdate:null,onMove:null}}static observeRemove=new MutationObserver(e=>{let t=0;Object.keys(Tooltip.active).forEach(e=>{e=Tooltip.active[e];e.displayed&&(e.anchor&&e.anchor.isConnected?t++:e.hide())}),0===t&&Tooltip.observeRemove.disconnect()});trigger(e,t){var i;if(2==arguments.length&&(i=e,(e=t).type=i),e.overlay)return e.overlay.trigger(e);console.log("ERROR: cannot find overlay where to trigger events")}get(e){return 0==arguments.length?Object.keys(Tooltip.active):!0===e?Tooltip.active:Tooltip.active[e.replace(/[\s\.#]/g,"_")]}attach(t,s){let l,r,n=this;if(0!=arguments.length){1==arguments.length&&t.anchor?t=(l=t).anchor:2===arguments.length&&"string"==typeof s?s=(l={anchor:t,html:s}).html:2===arguments.length&&null!=s&&"object"==typeof s&&(s=(l=s).html),l=w2utils.extend({},this.defaults,l||{}),!(s=!s&&l.text?l.text:s)&&l.html&&(s=l.html),delete l.anchor;let e=l.name||t.id;t!=document&&t!=document.body||(t=document.body,e="context-menu"),e||(e="noname-"+Object.keys(Tooltip.active).length,console.log("NOTICE: name property is not defined for tooltip, could lead to too many instances")),e=e.replace(/[\s\.#]/g,"_"),Tooltip.active[e]?((r=Tooltip.active[e]).prevOptions=r.options,r.options=l,r.anchor=t,r.prevOptions.html==r.options.html&&r.prevOptions.class==r.options.class&&r.prevOptions.style==r.options.style||(r.needsUpdate=!0),l=r.options):(r=new w2base,Object.assign(r,{id:"w2overlay-"+e,name:e,options:l,anchor:t,displayed:!1,tmp:{observeResize:new ResizeObserver(()=>{this.resize(r.name)})},hide(){n.hide(e)}}),Tooltip.active[e]=r),Object.keys(r.options).forEach(e=>{var t=r.options[e];e.startsWith("on")&&"function"==typeof t&&(r[e]=t,delete r.options[e])}),!0===l.autoShow&&(l.autoShowOn=l.autoShowOn??"mouseenter",l.autoHideOn=l.autoHideOn??"mouseleave",l.autoShow=!1),l.autoShowOn&&(s="autoShow-"+r.name,query(t).off("."+s).on(l.autoShowOn+"."+s,e=>{n.show(r.name),e.stopPropagation()}),delete l.autoShowOn),l.autoHideOn&&(s="autoHide-"+r.name,query(t).off("."+s).on(l.autoHideOn+"."+s,e=>{n.hide(r.name),e.stopPropagation()}),delete l.autoHideOn),r.off(".attach");let i={overlay:r,then:t=>(r.on("show:after.attach",e=>{t(e)}),i),show:t=>(r.on("show.attach",e=>{t(e)}),i),hide:t=>(r.on("hide.attach",e=>{t(e)}),i),update:t=>(r.on("update.attach",e=>{t(e)}),i),move:t=>(r.on("move.attach",e=>{t(e)}),i)};return i}}update(e,t){var i=Tooltip.active[e];i?(i.needsUpdate=!0,i.options.html=t,this.show(e)):console.log(`Tooltip "${e}" is not displayed. Cannot update it.`)}show(i){if(i instanceof HTMLElement||i instanceof Object){let e=i,t=(i instanceof HTMLElement&&((e=arguments[1]||{}).anchor=i),this.attach(e));return query(t.overlay.anchor).off(".autoShow-"+t.overlay.name).off(".autoHide-"+t.overlay.name),setTimeout(()=>{this.show(t.overlay.name)},1),t}let t,r=this,n=Tooltip.active[i.replace(/[\s\.#]/g,"_")];if(n){let l=n.options;if(!n||n.displayed&&!n.needsUpdate)this.resize(n?.name);else{var s=l.position.split("|"),s=["top","bottom"].includes(s[0]);let e="both"==l.align&&s?"":"white-space: nowrap;";if(l.maxWidth&&w2utils.getStrWidth(l.html,"")>l.maxWidth&&(e="width: "+l.maxWidth+"px; white-space: inherit; overflow: auto;"),e+=" max-height: "+(l.maxHeight||window.innerHeight-40)+"px;",""!==l.html&&null!=l.html){if(n.box){if(!0===(t=this.trigger("update",{target:i,overlay:n})).isCancelled)return void(n.prevOptions&&(n.options=n.prevOptions,delete n.prevOptions));query(n.box).find(".w2ui-overlay-body").attr("style",(l.style||"")+"; "+e).removeClass().addClass("w2ui-overlay-body "+l.class).html(l.html)}else{if(!0===(t=this.trigger("show",{target:i,overlay:n})).isCancelled)return;query("body").append(``),n.box=query("#"+w2utils.escapeId(n.id))[0],n.displayed=!0;s=query(n.anchor).data("tooltipName")??[];s.push(i),query(n.anchor).data("tooltipName",s),w2utils.bindEvents(n.box,{}),n.tmp.originalCSS="",0{r.hide(n.name)},i=query(n.anchor),s="tooltip-"+n.name;query("body").off("."+s),l.hideOn.includes("doc-click")&&(["INPUT","TEXTAREA"].includes(n.anchor.tagName)&&i.off(`.${s}-doc`).on(`click.${s}-doc`,e=>{e.stopPropagation()}),query("body").on("click."+s,t));l.hideOn.includes("focus-change")&&query("body").on("focusin."+s,e=>{document.activeElement!=n.anchor&&r.hide(n.name)});["INPUT","TEXTAREA"].includes(n.anchor.tagName)&&(i.off("."+s),l.hideOn.forEach(e=>{-1==["doc-click","focus-change"].indexOf(e)&&i.on(e+"."+s,{once:!0},t)}))}{var a=document.body;let e="tooltip-"+n.name,t=a;"BODY"==a.tagName&&(t=a.ownerDocument);query(t).off("."+e).on("scroll."+e,e=>{Object.assign(n.tmp,{scrollLeft:a.scrollLeft,scrollTop:a.scrollTop}),r.resize(n.name)})}return query(n.box).show(),n.tmp.observeResize.observe(n.box),Tooltip.observeRemove.observe(document.body,{subtree:!0,childList:!0}),query(n.box).css("opacity",1).find(".w2ui-overlay-body").html(l.html),setTimeout(()=>{query(n.box).css({"pointer-events":"auto"}).data("ready","yes")},100),delete n.needsUpdate,n.box.overlay=n,t&&t.finish(),{overlay:n}}r.hide(i)}}}hide(e){let i;if(0==arguments.length)Object.keys(Tooltip.active).forEach(e=>{this.hide(e)});else if(e instanceof HTMLElement)(query(e).data("tooltipName")??[]).forEach(e=>{this.hide(e)});else if("string"==typeof e&&(e=e.replace(/[\s\.#]/g,"_"),i=Tooltip.active[e]),i&&i.box){delete Tooltip.active[e];e=this.trigger("hide",{target:e,overlay:i});if(!0!==e.isCancelled){var s="tooltip-"+i.name;i.tmp.observeResize?.disconnect(),i.options.watchScroll&&query(i.options.watchScroll).off(".w2scroll-"+i.name);let t=0;Object.keys(Tooltip.active).forEach(e=>{Tooltip.active[e].displayed&&t++}),0==t&&Tooltip.observeRemove.disconnect(),query("body").off("."+s),query(document).off("."+s),i.box.remove(),i.box=null,i.displayed=!1;var l=query(i.anchor).data("tooltipName")??[];-1!=l.indexOf(i.name)&&l.splice(l.indexOf(i.name),1),0==l.length?query(i.anchor).removeData("tooltipName"):query(i.anchor).data("tooltipName",l),i.anchor.style.cssText=i.tmp.originalCSS,query(i.anchor).off("."+s).removeClass(i.options.anchorClass),e.finish()}}}resize(i){if(0==arguments.length)Object.keys(Tooltip.active).forEach(e=>{e=Tooltip.active[e];e.displayed&&this.resize(e.name)});else{var s=Tooltip.active[i.replace(/[\s\.#]/g,"_")];let t=this.getPosition(s.name);var l=t.left+"x"+t.top;let e;s.tmp.lastPos!=l&&(e=this.trigger("move",{target:i,overlay:s,pos:t})),query(s.box).css({left:t.left+"px",top:t.top+"px"}).then(e=>{null!=t.width&&e.css("width",t.width+"px").find(".w2ui-overlay-body").css("width","100%"),null!=t.height&&e.css("height",t.height+"px").find(".w2ui-overlay-body").css("height","100%")}).find(".w2ui-overlay-body").removeClass("w2ui-arrow-right w2ui-arrow-left w2ui-arrow-top w2ui-arrow-bottom").addClass(t.arrow.class).closest(".w2ui-overlay").find("style").text(t.arrow.style),s.tmp.lastPos!=l&&e&&(s.tmp.lastPos=l,e.finish())}}getPosition(e){let g=Tooltip.active[e.replace(/[\s\.#]/g,"_")];if(g&&g.box){let t=g.options;(g.tmp.resizedY||g.tmp.resizedX)&&query(g.box).css({width:"",height:"",scroll:"auto"});var e=w2utils.scrollBarSize(),y=!(document.body.scrollWidth==document.body.clientWidth),w=!(document.body.scrollHeight==document.body.clientHeight);let i={width:window.innerWidth-(w?e:0),height:window.innerHeight-(y?e:0)};var b,v=("auto"==t.position?"top|bottom|right|left":t.position).split("|");let s=["top","bottom"].includes(v[0]),l=g.box.getBoundingClientRect(),r=g.anchor.getBoundingClientRect(),n=(g.anchor==document.body&&({x,y:_,width:q,height:C}=t.originalEvent,r={left:x-2,top:_-4,width:q,height:C,arrow:"none"}),t.arrowSize),a=("none"==r.arrow&&(n=0),{top:r.top,bottom:i.height-(r.top+r.height)-+(y?e:0),left:r.left,right:i.width-(r.left+r.width)+(w?e:0)});l.width<22&&(l.width=22),l.height<14&&(l.height=14);let o,h,d,u,c="",p={offset:0,class:"",style:`#${g.id} { --tip-size: ${n}px; }`},f={left:0,top:0},m={posX:"",x:0,posY:"",y:0};v.forEach(e=>{["top","bottom"].includes(e)&&(!c&&l.height+n/1.893m.y&&Object.assign(m,{posY:e,y:a[e]})),["left","right"].includes(e)&&(!c&&l.width+n/1.893m.x&&Object.assign(m,{posX:e,x:a[e]}))}),c=c||(s?m.posY:m.posX),t.autoResize&&(["top","bottom"].includes(c)&&(l.height>a[c]?(u=a[c],g.tmp.resizedY=!0):g.tmp.resizedY=!1),["left","right"].includes(c)&&(l.width>a[c]?(d=a[c],g.tmp.resizedX=!0):g.tmp.resizedX=!1));var x=c;switch(p.class=r.arrow||"w2ui-arrow-"+x,x){case"top":o=r.left+(r.width-(d??l.width))/2,h=r.top-(u??l.height)-n/1.5+1;break;case"bottom":o=r.left+(r.width-(d??l.width))/2,h=r.top+r.height+n/1.25+1;break;case"left":o=r.left-(d??l.width)-n/1.2-1,h=r.top+(r.height-(u??l.height))/2;break;case"right":o=r.left+r.width+n/1.2+1,h=r.top+(r.height-(u??l.height))/2}if(s)"left"==t.align&&(f.left=r.left-o,o=r.left),"right"==t.align&&(f.left=r.left+r.width-(d??l.width)-o,o=r.left+r.width-(d??l.width)),["top","bottom"].includes(c)&&t.align.startsWith("both")&&(b=t.align.split(":")[1]??50,r.width>=b&&(o=r.left,d=r.width)),"top"==t.align&&(f.top=r.top-h,h=r.top),"bottom"==t.align&&(f.top=r.top+r.height-(u??l.height)-h,h=r.top+r.height-(u??l.height)),["left","right"].includes(c)&&t.align.startsWith("both")&&(b=t.align.split(":")[1]??50,r.height>=b&&(h=r.top,u=r.height));{let e;(["left","right"].includes(t.align)&&r.width<(d??l.width)||["top","bottom"].includes(t.align)&&r.height<(u??l.height))&&(e=!0);var _="right"==c?n:t.screenMargin,q="bottom"==c?n:t.screenMargin,C=i.width-(d??l.width)-("left"==c?n:t.screenMargin),y=i.height-(u??l.height)-("top"==c?n:t.screenMargin)+3;(["top","bottom"].includes(c)||t.autoResize)&&(o<_&&(e=!0,f.left-=o,o=_),o>C&&(e=!0,f.left-=o-C,o+=C-o));(["left","right"].includes(c)||t.autoResize)&&(hy&&(e=!0,f.top-=h-y,h+=y-h));e&&(_=s?"left":"top",C=s?"width":"height",p.offset=-f[_],q=l[C]/2-n,Math.abs(p.offset)>q+n&&(p.class=""),Math.abs(p.offset)>q&&(p.offset=p.offset<0?-q:q),p.style=w2utils.stripSpaces(`#${g.id} .w2ui-overlay-body:after, - #${g.id} .w2ui-overlay-body:before { - --tip-size: ${n}px; - margin-${_}: ${p.offset}px; - }`))}w="top"==c?-t.margin:"bottom"==c?t.margin:0,e="left"==c?-t.margin:"right"==c?t.margin:0;return h=Math.floor(100*(h+parseFloat(t.offsetY)+parseFloat(w)))/100,{left:o=Math.floor(100*(o+parseFloat(t.offsetX)+parseFloat(e)))/100,top:h,arrow:p,adjust:f,width:d,height:u,pos:c}}}}class ColorTooltip extends Tooltip{constructor(){super(),this.palette=[["000000","333333","555555","777777","888888","999999","AAAAAA","CCCCCC","DDDDDD","EEEEEE","F7F7F7","FFFFFF"],["FF011B","FF9838","FFC300","FFFD59","86FF14","14FF7A","2EFFFC","2693FF","006CE7","9B24F4","FF21F5","FF0099"],["FFEAEA","FCEFE1","FCF4DC","FFFECF","EBFFD9","D9FFE9","E0FFFF","E8F4FF","ECF4FC","EAE6F4","FFF5FE","FCF0F7"],["F4CCCC","FCE5CD","FFF1C2","FFFDA1","D5FCB1","B5F7D0","BFFFFF","D6ECFF","CFE2F3","D9D1E9","FFE3FD","FFD9F0"],["EA9899","F9CB9C","FFE48C","F7F56F","B9F77E","84F0B1","83F7F7","B5DAFF","9FC5E8","B4A7D6","FAB9F6","FFADDE"],["E06666","F6B26B","DEB737","E0DE51","8FDB48","52D189","4EDEDB","76ACE3","6FA8DC","8E7CC3","E07EDA","F26DBD"],["CC0814","E69138","AB8816","B5B20E","6BAB30","27A85F","1BA8A6","3C81C7","3D85C6","674EA7","A14F9D","BF4990"],["99050C","B45F17","80650E","737103","395E14","10783D","13615E","094785","0A5394","351C75","780172","782C5A"]],this.defaults=w2utils.extend({},this.defaults,{advanced:!1,transparent:!0,position:"top|bottom",class:"w2ui-white",color:"",liveUpdate:!0,arrowSize:12,autoResize:!1,anchorClass:"w2ui-focus",autoShowOn:"focus",hideOn:["doc-click","focus-change"],onSelect:null,onLiveUpdate:null})}attach(e,t){let i;1==arguments.length&&e.anchor?e=(i=e).anchor:2===arguments.length&&null!=t&&"object"==typeof t&&((i=t).anchor=e);t=i.hideOn;i=w2utils.extend({},this.defaults,i||{}),t&&(i.hideOn=t),i.style+="; padding: 0;",i.transparent&&"333333"==this.palette[0][1]&&(this.palette[0].splice(1,1),this.palette[0].push("")),i.transparent||"333333"==this.palette[0][1]||(this.palette[0].splice(1,0,"333333"),this.palette[0].pop()),i.color&&(i.color=String(i.color).toUpperCase()),"string"==typeof i.color&&"#"===i.color.substr(0,1)&&(i.color=i.color.substr(1)),this.index=[-1,-1];let s=super.attach(i),l=s.overlay;return l.options.html=this.getColorHTML(l.name,i),l.on("show.attach",e=>{var e=e.detail.overlay,t=e.anchor,i=e.options;["INPUT","TEXTAREA"].includes(t.tagName)&&!i.color&&t.value&&(e.tmp.initColor=t.value),delete e.newColor}),l.on("show:after.attach",e=>{var t;s.overlay?.box&&(t=query(s.overlay.box).find(".w2ui-eaction"),w2utils.bindEvents(t,this),this.initControls(s.overlay))}),l.on("update:after.attach",e=>{var t;s.overlay?.box&&(t=query(s.overlay.box).find(".w2ui-eaction"),w2utils.bindEvents(t,this),this.initControls(s.overlay))}),l.on("hide.attach",e=>{var e=e.detail.overlay,t=e.anchor,i=e.newColor??e.options.color??"",t=(["INPUT","TEXTAREA"].includes(t.tagName)&&t.value!=i&&(t.value=i),this.trigger("select",{color:i,target:e.name,overlay:e}));!0!==t.isCancelled&&t.finish()}),s.liveUpdate=t=>(l.on("liveUpdate.attach",e=>{t(e)}),s),s.select=t=>(l.on("select.attach",e=>{t(e)}),s),s}select(e,t){let i;this.index=[-1,-1],"string"!=typeof t&&(i=t.target,this.index=query(i).attr("index").split(":"),t=query(i).closest(".w2ui-overlay").attr("name"));var s=this.get(t),t=this.trigger("liveUpdate",{color:e,target:t,overlay:s,param:arguments[1]});!0!==t.isCancelled&&(["INPUT","TEXTAREA"].includes(s.anchor.tagName)&&s.options.liveUpdate&&query(s.anchor).val(e),s.newColor=e,query(s.box).find(".w2ui-selected").removeClass("w2ui-selected"),i&&query(i).addClass("w2ui-selected"),t.finish())}nextColor(e){var t=this.palette;switch(e){case"up":this.index[0]--;break;case"down":this.index[0]++;break;case"right":this.index[1]++;break;case"left":this.index[1]--}return this.index[0]<0&&(this.index[0]=0),this.index[0]>t.length-2&&(this.index[0]=t.length-2),this.index[1]<0&&(this.index[1]=0),this.index[1]>t[0].length-1&&(this.index[1]=t[0].length-1),t[this.index[0]][this.index[1]]}tabClick(e,t){"string"!=typeof t&&(t=query(t.target).closest(".w2ui-overlay").attr("name"));var t=this.get(t),i=query(t.box).find(`.w2ui-color-tab:nth-child(${e})`);query(t.box).find(".w2ui-color-tab").removeClass("w2ui-selected"),query(i).addClass("w2ui-selected"),query(t.box).find(".w2ui-tab-content").hide().closest(".w2ui-colors").find(".tab-"+e).show()}getColorHTML(s,l){let r=` -
      -
      `;for(let i=0;i';for(let t=0;t  -
      `}r+="
      ",i<2&&(r+='
      ')}return r=(r=(r+="
      ")+` - `)+` -
      -
      -
      -
      - ${"string"==typeof l.html?l.html:""} -
      -
      `}initControls(a){let n,o=this;var e=a.options;let h=w2utils.parseColor(e.color||a.tmp.initColor),d=(null==h&&(h={r:140,g:150,b:160,a:1}),w2utils.rgb2hsv(h));!0===e.advanced&&this.tabClick(2,a.name),u(d,!0,!0),query(a.box).find("input").off(".w2color").on("change.w2color",e=>{e=query(e.target);let t=parseFloat(e.val());var i=parseFloat(e.attr("max")),i=(isNaN(t)&&(t=0,e.val(0)),1i&&(e.val(i),t=i),t<0&&(e.val(0),t=0),e.attr("name")),e={};-1!==["r","g","b","a"].indexOf(i)?(h[i]=t,d=w2utils.rgb2hsv(h)):-1!==["h","s","v"].indexOf(i)&&(e[i]=t),u(e,!0)}),query(a.box).find(".color-original").off(".w2color").on("click.w2color",e=>{e=w2utils.parseColor(query(e.target).css("background-color"));null!=e&&(h=e,u(d=w2utils.rgb2hsv(h),!0))});e=`${w2utils.isIOS?"touchstart":"mousedown"}.w2color`;let s=`${w2utils.isIOS?"touchend":"mouseup"}.w2color`,l=`${w2utils.isIOS?"touchmove":"mousemove"}.w2color`;function u(e,t,i){null!=e.h&&(d.h=e.h),null!=e.s&&(d.s=e.s),null!=e.v&&(d.v=e.v),null!=e.a&&(h.a=e.a,d.a=e.a);let s="rgba("+(h=w2utils.hsv2rgb(d)).r+","+h.g+","+h.b+","+h.a+")",l=[Number(h.r).toString(16).toUpperCase(),Number(h.g).toString(16).toUpperCase(),Number(h.b).toString(16).toUpperCase(),Math.round(255*Number(h.a)).toString(16).toUpperCase()];var r,n;l.forEach((e,t)=>{1===e.length&&(l[t]="0"+e)}),s=l[0]+l[1]+l[2]+l[3],1===h.a&&(s=l[0]+l[1]+l[2]),query(a.box).find(".color-preview").css("background-color","#"+s),query(a.box).find("input").each(e=>{e.name&&(null!=h[e.name]&&(e.value=h[e.name]),null!=d[e.name]&&(e.value=d[e.name]),"a"===e.name&&(e.value=h.a))}),i?(e=a.tmp?.initColor||s,query(a.box).find(".color-original").css("background-color","#"+e),query(a.box).find(".w2ui-colors .w2ui-selected").removeClass("w2ui-selected"),query(a.box).find(`.w2ui-colors [name="${e}"]`).addClass("w2ui-selected"),8==s.length&&o.tabClick(2,a.name)):o.select(s,a.name),t&&(i=query(a.box).find(".palette .value1"),e=query(a.box).find(".rainbow .value2"),t=query(a.box).find(".alpha .value2"),r=parseInt(i[0].clientWidth)/2,n=parseInt(e[0].clientWidth)/2,i.css({left:150*d.s/100-r+"px",top:125*(100-d.v)/100-r+"px"}),e.css("left",d.h/2.4-n+"px"),t.css("left",150*h.a-n+"px"),c())}function c(){var e=w2utils.hsv2rgb(d.h,100,100),e=`${e.r},${e.g},`+e.b;query(a.box).find(".palette").css("background-image",`linear-gradient(90deg, rgba(${e},0) 0%, rgba(${e},1) 100%)`)}function r(e){query("body").off(".w2color")}function p(e){var t=n.el,i=e.pageX-n.x,e=e.pageY-n.y;let s=n.left+i,l=n.top+e;var i=parseInt(t.prop("clientWidth"))/2,e=(s<-i&&(s=-i),l<-i&&(l=-i),s>n.width-i&&(s=n.width-i),l>n.height-i&&(l=n.height-i),t.hasClass("move-x")&&t.css({left:s+"px"}),t.hasClass("move-y")&&t.css({top:l+"px"}),query(t.get(0).parentNode).attr("name")),r=parseInt(t.css("left"))+i,t=parseInt(t.css("top"))+i;"palette"===e&&u({s:Math.round(r/n.width*100),v:Math.round(100-t/n.height*100)}),"rainbow"===e&&(u({h:Math.round(2.4*r)}),c()),"alpha"===e&&u({a:parseFloat(Number(r/150).toFixed(2))})}query(a.box).find(".palette, .rainbow, .alpha").off(".w2color").on(e+".w2color",function(e){var t=query(this).find(".value1, .value2"),i=parseInt(t.prop("clientWidth"))/2;t.hasClass("move-x")&&t.css({left:e.offsetX-i+"px"});t.hasClass("move-y")&&t.css({top:e.offsetY-i+"px"});n={el:t,x:e.pageX,y:e.pageY,width:t.prop("parentNode").clientWidth,height:t.prop("parentNode").clientHeight,left:parseInt(t.css("left")),top:parseInt(t.css("top"))},p(e),query("body").off(".w2color").on(l,p).on(s,r)})}}class MenuTooltip extends Tooltip{constructor(){super(),this.defaults=w2utils.extend({},this.defaults,{type:"normal",items:[],index:null,render:null,spinner:!1,msgNoItems:w2utils.lang("No items found"),topHTML:"",menuStyle:"",filter:!1,markSearch:!1,match:"contains",search:!1,altRows:!1,arrowSize:10,align:"left",position:"bottom|top",class:"w2ui-white",anchorClass:"w2ui-focus",autoShowOn:"focus",hideOn:["doc-click","focus-change","select"],onSelect:null,onSubMenu:null,onRemove:null})}attach(e,t){let i;1==arguments.length&&e.anchor?e=(i=e).anchor:2===arguments.length&&null!=t&&"object"==typeof t&&((i=t).anchor=e);t=i.hideOn;i=w2utils.extend({},this.defaults,i||{}),t&&(i.hideOn=t),i.style+="; padding: 0;",null==i.items&&(i.items=[]),i.html=this.getMenuHTML(i);let s=super.attach(i),l=s.overlay;return l.on("show:after.attach, update:after.attach",e=>{if(s.overlay?.box){let e="";l.selected=null,l.options.items=w2utils.normMenu(l.options.items),["INPUT","TEXTAREA"].includes(l.anchor.tagName)&&(e=l.anchor.value,l.selected=l.anchor.dataset.selectedIndex);var t=query(s.overlay.box).find(".w2ui-eaction"),t=(w2utils.bindEvents(t,this),this.applyFilter(l.name,null,e));l.tmp.searchCount=t,l.tmp.search=e,this.refreshSearch(l.name),this.initControls(s.overlay),this.refreshIndex(l.name)}}),l.on("hide:after.attach",e=>{w2tooltip.hide(l.name+"-tooltip")}),s.select=t=>(l.on("select.attach",e=>{t(e)}),s),s.remove=t=>(l.on("remove.attach",e=>{t(e)}),s),s.subMenu=t=>(l.on("subMenu.attach",e=>{t(e)}),s),s}update(e,t){var i,s=Tooltip.active[e];s?((i=s.options).items!=t&&(i.items=t),t=this.getMenuHTML(i),i.html!=t&&(i.html=t,s.needsUpdate=!0,this.show(e))):console.log(`Tooltip "${e}" is not displayed. Cannot update it.`)}initControls(i){query(i.box).find(".w2ui-menu:not(.w2ui-sub-menu)").off(".w2menu").on("mouseDown.w2menu",{delegate:".w2ui-menu-item"},e=>{var t=e.delegate.dataset;this.menuDown(i,e,t.index,t.parents)}).on((w2utils.isIOS?"touchStart":"click")+".w2menu",{delegate:".w2ui-menu-item"},e=>{var t=e.delegate.dataset;this.menuClick(i,e,parseInt(t.index),t.parents)}).find(".w2ui-menu-item").off(".w2menu").on("mouseEnter.w2menu",e=>{var t=e.target.dataset,t=i.options.items[t.index]?.tooltip;t&&w2tooltip.show({name:i.name+"-tooltip",anchor:e.target,html:t,position:"right|left",hideOn:["doc-click"]})}).on("mouseLeave.w2menu",e=>{w2tooltip.hide(i.name+"-tooltip")}),["INPUT","TEXTAREA"].includes(i.anchor.tagName)&&query(i.anchor).off(".w2menu").on("input.w2menu",e=>{}).on("keyup.w2menu",e=>{e._searchType="filter",this.keyUp(i,e)}),i.options.search&&query(i.box).find("#menu-search").off(".w2menu").on("keyup.w2menu",e=>{e._searchType="search",this.keyUp(i,e)})}getCurrent(e,t){var e=Tooltip.active[e.replace(/[\s\.#]/g,"_")],i=e.options;let s=(t||(e.selected??"")).split("-");var t=s.length-1,e=s[t],l=s.slice(0,s.length-1).join("-"),e=w2utils.isInt(e)?parseInt(e):0;let r=i.items;return s.forEach((e,t)=>{t -
      -
      - ${w2utils.lang("Loading...")} -
      -
      `;u=u||[],null==e&&(e=h.items),Array.isArray(e)||(e=[]);let c=0,t=null,i="",p=(!d&&h.search&&(i+=` - `,e.forEach(e=>e.hidden=!1)),!d&&h.topHTML&&(i+=`
      ${h.topHTML}
      `),` - ${i} -
      - `);return e.forEach((r,n)=>{t=r.icon;var a=(0`),s=``),"break"!==r.type&&null!=i&&""!==i&&"--"!=String(i).substr(0,2)){var o=["w2ui-menu-item"];1==h.altRows&&o.push(c%2==0?"w2ui-even":"w2ui-odd");let e=1,t=(""===s&&e++,null==r.count&&null==r.hotkey&&!0!==r.remove&&null==r.items&&e++,null==r.tooltip&&null!=r.hint&&(r.tooltip=r.hint),"");if(!0===r.remove)t='x';else if(null!=r.items){let e=[];"function"==typeof r.items?e=r.items(r):Array.isArray(r.items)&&(e=r.items),t="",l=` -
      - ${this.getMenuHTML(h,e,!0,u.concat(n))} -
      `}else null!=r.count&&(t+=""+r.count+""),null!=r.hotkey&&(t+=''+r.hotkey+"");!0===r.disabled&&o.push("w2ui-disabled"),!0===r._noSearchInside&&o.push("w2ui-no-search-inside"),""!==l&&(o.push("has-sub-menu"),r.expanded?o.push("expanded"):o.push("collapsed")),p+=` -
      -
      - ${s} - - -
      - `+l,c++}else{o=(i??"").replace(/^-+/g,"");p+=` -
      -
      - ${o?`
      ${o}
      `:""} -
      `}}e[n]=r}),0===c&&h.msgNoItems&&(p+=` -
      - ${w2utils.lang(h.msgNoItems)} -
      `),p+="
      "}refreshIndex(e){var t,i,e=Tooltip.active[e.replace(/[\s\.#]/g,"_")];e&&(e.displayed||this.show(e.name),t=query(e.box).find(".w2ui-overlay-body").get(0),i=query(e.box).find(".w2ui-menu-search, .w2ui-menu-top").get(0),query(e.box).find(".w2ui-menu-item.w2ui-selected").removeClass("w2ui-selected"),(e=query(e.box).find(`.w2ui-menu-item[index="${e.selected}"]`).addClass("w2ui-selected").get(0))&&(e.offsetTop+e.clientHeight>t.clientHeight+t.scrollTop&&e.scrollIntoView({behavior:"smooth",block:"start",inline:"start"}),e.offsetTop{var t;this.getCurrent(i,e.getAttribute("index")).item.hidden?query(e).hide():((t=s.tmp?.search)&&s.options.markSearch&&w2utils.marker(e,t,{onlyFirst:"begins"==s.options.match}),query(e).show())}),query(s.box).find(".w2ui-sub-menu").each(e=>{var t=query(e).find(".w2ui-menu-item").get().some(e=>"none"!=e.style.display);this.getCurrent(i,e.dataset.parent).item.expanded&&(t?query(e).parent().show():query(e).parent().hide())}),0!=s.tmp.searchCount&&0!=s.options?.items.length||(0==query(s.box).find(".w2ui-no-items").length&&query(s.box).find(".w2ui-menu:not(.w2ui-sub-menu)").append(` -
      - ${w2utils.lang(s.options.msgNoItems)} -
      `),query(s.box).find(".w2ui-no-items").show()))}applyFilter(r,e,n){let a=0;var t=Tooltip.active[r.replace(/[\s\.#]/g,"_")];let o=t.options;if(!1!==o.filter){null==e&&(e=t.options.items),null==n&&(n=["INPUT","TEXTAREA"].includes(t.anchor.tagName)?t.anchor.value:"");let l=[];return o.selected&&(Array.isArray(o.selected)?l=o.selected.map(e=>e?.id??e):o.selected?.id&&(l=[o.selected.id])),e.forEach(e=>{let t="",i="";-1!==["is","begins","begins with"].indexOf(o.match)&&(t="^"),-1!==["is","ends","ends with"].indexOf(o.match)&&(i="$");try{new RegExp(t+n+i,"i").test(e.text)||"..."===e.text?e.hidden=!1:e.hidden=!0}catch(e){}var s;o.hideSelected&&l.includes(e.id)&&(e.hidden=!0),Array.isArray(e.items)&&0{e.hidden||e.disabled||e?.text.startsWith("--")||(l.push(s.concat([t]).join("-")),Array.isArray(e.items)&&0{l=l[e].items}),l[i]);if(!a.disabled){let l=(i,s)=>{i.forEach((e,t)=>{e.id!=a.id&&(e.group===a.group&&e.checked&&(n.find(`.w2ui-menu-item[index="${(s?s+"-":"")+t}"] .w2ui-icon`).removeClass("w2ui-icon-check").addClass("w2ui-icon-empty"),i[t].checked=!1),Array.isArray(e.items)&&l(e.items,t))})};"check"!==e.type&&"radio"!==e.type||!1===a.group||query(t.target).hasClass("remove")||query(t.target).closest(".w2ui-menu-item").hasClass("has-sub-menu")||(a.checked="radio"==e.type||!a.checked,a.checked?("radio"===e.type&&query(t.target).closest(".w2ui-menu").find(".w2ui-icon").removeClass("w2ui-icon-check").addClass("w2ui-icon-empty"),"check"===e.type&&null!=a.group&&l(e.items),r.removeClass("w2ui-icon-empty").addClass("w2ui-icon-check")):"check"===e.type&&r.removeClass("w2ui-icon-check").addClass("w2ui-icon-empty")),query(t.target).hasClass("remove")||(n.find(".w2ui-menu-item").removeClass("w2ui-selected"),query(t.delegate).addClass("w2ui-selected"))}}menuClick(t,i,s,l){var r=t.options;let n=r.items;var a=query(i.delegate).closest(".w2ui-menu-item");let o=!r.hideOn.includes("select");(i.shiftKey||i.metaKey||i.ctrlKey)&&(o=!0),"string"==typeof l&&""!==l?l.split("-").forEach(e=>{n=n[e].items}):l=null;var h=(n="function"==typeof n?n({overlay:t,index:s,parentIndex:l,event:i}):n)[s];if(!h.disabled||query(i.target).hasClass("remove")){let e;if(query(i.target).hasClass("remove")){if(!0===(e=this.trigger("remove",{originalEvent:i,target:t.name,overlay:t,item:h,index:s,parentIndex:l,el:a[0]})).isCancelled)return;o=!r.hideOn.includes("item-remove"),a.remove()}else if(a.hasClass("has-sub-menu")){if(!0===(e=this.trigger("subMenu",{originalEvent:i,target:t.name,overlay:t,item:h,index:s,parentIndex:l,el:a[0]})).isCancelled)return;o=!0,a.hasClass("expanded")?(h.expanded=!1,a.removeClass("expanded").addClass("collapsed"),query(a.get(0).nextElementSibling).hide()):(h.expanded=!0,a.addClass("expanded").removeClass("collapsed"),query(a.get(0).nextElementSibling).show()),t.selected=parseInt(a.attr("index"))}else{r=this.findChecked(r.items);if(t.selected=parseInt(a.attr("index")),!0===(e=this.trigger("select",{originalEvent:i,target:t.name,overlay:t,item:h,index:s,parentIndex:l,selected:r,keepOpen:o,el:a[0]})).isCancelled)return;null!=h.keepOpen&&(o=h.keepOpen),["INPUT","TEXTAREA"].includes(t.anchor.tagName)&&(t.anchor.dataset.selected=h.id,t.anchor.dataset.selectedIndex=t.selected)}o||this.hide(t.name),e.finish()}}findChecked(e){let t=[];return e.forEach(e=>{e.checked&&t.push(e),Array.isArray(e.items)&&(t=t.concat(this.findChecked(e.items)))}),t}keyUp(s,l){var e,r=s.options,t=l.target.value;let n=!0,a=!1;switch(l.keyCode){case 8:""!==t||s.displayed||(n=!1);break;case 13:if(!s.displayed||!s.selected)return;var{index:i,parents:o}=this.getCurrent(s.name);l.delegate=query(s.box).find(".w2ui-selected").get(0),this.menuClick(s,l,parseInt(i),o),n=!1;break;case 27:n=!1,s.displayed?this.hide(s.name):(i=s.anchor,["INPUT","TEXTAREA"].includes(i.tagName)&&(i.value="",delete i.dataset.selected,delete i.dataset.selectedIndex));break;case 37:{if(!s.displayed)return;let{item:e,index:t,parents:i}=this.getCurrent(s.name);i&&(e=r.items[i],t=parseInt(i),i="",a=!0),Array.isArray(e?.items)&&0{var e=e.detail.overlay,t=e.anchor,i=e.options;["INPUT","TEXTAREA"].includes(t.tagName)&&!i.value&&t.value&&(e.tmp.initValue=t.value),delete e.newValue,delete e.newDate}),l.on("show:after.attach",e=>{s.overlay?.box&&this.initControls(s.overlay)}),l.on("update:after.attach",e=>{s.overlay?.box&&this.initControls(s.overlay)}),l.on("hide.attach",e=>{var e=e.detail.overlay,t=e.anchor;null!=e.newValue&&(e.newDate&&(e.newValue=e.newDate+" "+e.newValue),["INPUT","TEXTAREA"].includes(t.tagName)&&t.value!=e.newValue&&(t.value=e.newValue),!0!==(t=this.trigger("select",{date:e.newValue,target:e.name,overlay:e})).isCancelled&&t.finish())}),s.select=t=>(l.on("select.attach",e=>{t(e)}),s),s}initControls(l){let r=l.options,t=e=>{let{month:t,year:i}=l.tmp;12<(t+=e)&&(t=1,i++),t<1&&(t=12,i--);e=this.getMonthHTML(r,t,i);Object.assign(l.tmp,e),query(l.box).find(".w2ui-overlay-body").html(e.html),this.initControls(l)},i=(e,t)=>{query(e.target).parent().find(".w2ui-jump-month, .w2ui-jump-year").removeClass("w2ui-selected"),query(e.target).addClass("w2ui-selected");e=new Date;let{jumpMonth:i,jumpYear:s}=l.tmp;t&&(null==s&&(s=e.getFullYear()),null==i&&(i=e.getMonth()+1)),i&&s&&(t=this.getMonthHTML(r,i,s),Object.assign(l.tmp,t),query(l.box).find(".w2ui-overlay-body").html(t.html),l.tmp.jump=!1,this.initControls(l))};query(l.box).find(".w2ui-cal-title").off(".calendar").on("click.calendar",e=>{var t,i;Object.assign(l.tmp,{jumpYear:null,jumpMonth:null}),l.tmp.jump?({month:t,year:i}=l.tmp,t=this.getMonthHTML(r,t,i),query(l.box).find(".w2ui-overlay-body").html(t.html),l.tmp.jump=!1):(query(l.box).find(".w2ui-overlay-body .w2ui-cal-days").replace(this.getYearHTML()),(i=query(l.box).find(`[name="${l.tmp.year}"]`).get(0))&&i.scrollIntoView(!0),l.tmp.jump=!0),this.initControls(l),e.stopPropagation()}).find(".w2ui-cal-previous").off(".calendar").on("click.calendar",e=>{t(-1),e.stopPropagation()}).parent().find(".w2ui-cal-next").off(".calendar").on("click.calendar",e=>{t(1),e.stopPropagation()}),query(l.box).find(".w2ui-cal-now").off(".calendar").on("click.calendar",e=>{"datetime"==r.type?l.newDate?l.newValue=w2utils.formatTime(new Date,r.format.split("|")[1]):l.newValue=w2utils.formatDateTime(new Date,r.format):"date"==r.type?l.newValue=w2utils.formatDate(new Date,r.format):"time"==r.type&&(l.newValue=w2utils.formatTime(new Date,r.format)),this.hide(l.name)}),query(l.box).off(".calendar").on("click.calendar",{delegate:".w2ui-day.w2ui-date"},e=>{"datetime"==r.type?(l.newDate=query(e.target).attr("date"),query(l.box).find(".w2ui-overlay-body").html(this.getHourHTML(l.options).html),this.initControls(l)):(l.newValue=query(e.target).attr("date"),this.hide(l.name))}).on("click.calendar",{delegate:".w2ui-jump-month"},e=>{l.tmp.jumpMonth=parseInt(query(e.target).attr("name")),i(e)}).on("dblclick.calendar",{delegate:".w2ui-jump-month"},e=>{l.tmp.jumpMonth=parseInt(query(e.target).attr("name")),i(e,!0)}).on("click.calendar",{delegate:".w2ui-jump-year"},e=>{l.tmp.jumpYear=parseInt(query(e.target).attr("name")),i(e)}).on("dblclick.calendar",{delegate:".w2ui-jump-year"},e=>{l.tmp.jumpYear=parseInt(query(e.target).attr("name")),i(e,!0)}).on("click.calendar",{delegate:".w2ui-time.hour"},e=>{var e=query(e.target).attr("hour");let t=this.str2min(r.value)%60;l.tmp.initValue&&!r.value&&(t=this.str2min(l.tmp.initValue)%60),r.noMinutes?(l.newValue=this.min2str(60*e,r.format),this.hide(l.name)):(l.newValue=e+":"+t,e=this.getMinHTML(e,r).html,query(l.box).find(".w2ui-overlay-body").html(e),this.initControls(l))}).on("click.calendar",{delegate:".w2ui-time.min"},e=>{e=60*Math.floor(this.str2min(l.newValue)/60)+parseInt(query(e.target).attr("min"));l.newValue=this.min2str(e,r.format),this.hide(l.name)})}getMonthHTML(l,r,e){var t=w2utils.settings.fulldays.slice(),i=w2utils.settings.shortdays.slice();"M"!==w2utils.settings.weekStarts&&(t.unshift(t.pop()),i.unshift(i.pop()));let s=new Date;var t="datetime"===l.type?w2utils.isDateTime(l.value,l.format,!0):w2utils.isDate(l.value,l.format,!0),n=w2utils.formatDate(t);null!=r&&null!=e||(e=(t||s).getFullYear(),r=t?t.getMonth()+1:s.getMonth()+1),12${i[e]}`}let c=` -
      -
      -
      -
      -
      -
      -
      - ${w2utils.settings.fullmonths[r-1]}, ${e} - -
      -
      - ${o} - `,p=new Date(e+`/${r}/1`);t=p.getDay();"M"==w2utils.settings.weekStarts&&a--,0 - ${g} -
      `,p=new Date(p.getTime()+864e5)}return c+="",l.btnNow&&(t=w2utils.lang("Today"+("datetime"==l.type?" & Now":"")),c+=`
      ${t}
      `),{html:c,month:r,year:e}}getYearHTML(){let t="",i="";for(let e=0;e${w2utils.settings.shortmonths[e]}`;for(let e=w2utils.settings.dateStartYear;e<=w2utils.settings.dateEndYear;e++)i+=`
      ${e}
      `;return`
      -
      ${t}
      -
      ${i}
      -
      `}getHourHTML(l){(l=l??{}).format||(l.format=w2utils.settings.timeFormat);var r=-1${e}`}return{html:`
      -
      ${w2utils.lang("Select Hour")}
      -
      -
      ${a[0]}
      -
      ${a[1]}
      -
      ${a[2]}
      -
      - ${l.btnNow?`
      ${w2utils.lang("Now")}
      `:""} -
      `}}getMinHTML(i,s){null==i&&(i=0),(s=s??{}).format||(s.format=w2utils.settings.timeFormat);var l=-1${a}`}return{html:`
      -
      ${w2utils.lang("Select Minute")}
      -
      -
      ${n[0]}
      -
      ${n[1]}
      -
      ${n[2]}
      -
      - ${s.btnNow?`
      ${w2utils.lang("Now")}
      `:""} -
      `}}inRange(i,s,e){let l=!1;if("date"===s.type){var r=w2utils.isDate(i,s.format,!0);if(r){if(s.start||s.end){var n="string"==typeof s.start?s.start:query(s.start).val(),a="string"==typeof s.end?s.end:query(s.end).val();let e=w2utils.isDate(n,s.format,!0),t=w2utils.isDate(a,s.format,!0);n=new Date(r);e=e||n,t=t||n,n>=e&&n<=t&&(l=!0)}else l=!0;Array.isArray(s.blockDates)&&s.blockDates.includes(i)&&(l=!1),Array.isArray(s.blockWeekdays)&&s.blockWeekdays.includes(r.getDay())&&(l=!1)}}else if("time"===s.type)if(s.start||s.end){a=this.str2min(i);let e=this.str2min(s.start),t=this.str2min(s.end);e=e||a,t=t||a,a>=e&&a<=t&&(l=!0)}else l=!0;else"datetime"!==s.type||(n=w2utils.isDateTime(i,s.format,!0))&&(r=s.format.split("|").map(e=>e.trim()),e?(a=w2utils.formatDate(n,r[0]),i=w2utils.extend({},s,{type:"date",format:r[0]}),this.inRange(a,i)&&(l=!0)):(e=w2utils.formatTime(n,r[1]),a={type:"time",format:r[1],start:s.startTime,end:s.endTime},this.inRange(e,a)&&(l=!0)));return l}str2min(e){var t;return"string"!=typeof e||2!==(t=e.split(":")).length?null:(t[0]=parseInt(t[0]),t[1]=parseInt(t[1]),-1!==e.indexOf("pm")&&12!==t[0]&&(t[0]+=12),e.includes("am")&&12==t[0]&&(t[0]=0),60*t[0]+t[1])}min2str(e,t){let i="";1440<=e&&(e%=1440),e<0&&(e=1440+e);var s=Math.floor(e/60),e=(e%60<10?"0":"")+e%60;return t=t||w2utils.settings.timeFormat,i=-1!==t.indexOf("h24")?s+":"+e:(s<=12?s:s-12)+":"+e+" "+(12<=s?"pm":"am")}}let w2tooltip=new Tooltip,w2menu=new MenuTooltip,w2color=new ColorTooltip,w2date=new DateTooltip;class w2toolbar extends w2base{constructor(e){super(e.name),this.box=null,this.name=null,this.routeData={},this.items=[],this.right="",this.tooltip="top|left",this.onClick=null,this.onMouseDown=null,this.onMouseUp=null,this.onMouseEnter=null,this.onMouseLeave=null,this.onRender=null,this.onRefresh=null,this.onResize=null,this.onDestroy=null,this.item_template={id:null,type:"button",text:null,html:"",tooltip:null,count:null,hidden:!1,disabled:!1,checked:!1,icon:null,route:null,arrow:null,style:null,group:null,items:null,selected:null,color:null,overlay:{anchorClass:""},onClick:null,onRefresh:null},this.last={badge:{}};var t=e.items;delete e.items,Object.assign(this,e),Array.isArray(t)&&this.add(t,!0),e.items=t,"string"==typeof this.box&&(this.box=query(this.box).get(0)),this.box&&this.render(this.box)}add(e,t){this.insert(null,e,t)}insert(r,e,n){(e=Array.isArray(e)?e:[e]).forEach((e,t,i)=>{"string"==typeof e&&(e=i[t]={id:e,text:e});var l,s=["button","check","radio","drop","menu","menu-radio","menu-check","color","text-color","html","break","spacer","new-line"];if(s.includes(String(e.type)))if(null!=e.id||["break","spacer","new-line"].includes(e.type)){if(null==e.type)console.log('ERROR: The parameter "type" is required but not supplied.',e);else if(w2utils.checkUniqueId(e.id,this.items,"toolbar",this.name)){let s=w2utils.extend({},this.item_template,e);"menu-check"==s.type?(Array.isArray(s.selected)||(s.selected=[]),Array.isArray(s.items)&&s.items.forEach(e=>{(e="string"==typeof e?i[t]={id:e,text:e}:e).checked&&!s.selected.includes(e.id)&&s.selected.push(e.id),!e.checked&&s.selected.includes(e.id)&&(e.checked=!0),null==e.checked&&(e.checked=!1)})):"menu-radio"==s.type&&Array.isArray(s.items)&&s.items.forEach((e,t,i)=>{(e="string"==typeof e?i[t]={id:e,text:e}:e).checked&&null==s.selected?s.selected=e.id:e.checked=!1,e.checked||s.selected!=e.id||(e.checked=!0),null==e.checked&&(e.checked=!1)}),null==r?this.items.push(s):(l=this.get(r,!0),this.items=this.items.slice(0,l).concat([s],this.items.slice(l))),s.line=s.line??1,!0!==n&&this.refresh(s.id)}}else console.log('ERROR: The parameter "id" is required but not supplied.',e);else console.log('ERROR: The parameter "type" should be one of the following:',s,`, but ${e.type} is supplied.`,e)}),!0!==n&&this.resize()}remove(){let i=0;return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&-1==String(e).indexOf(":")&&(i++,query(this.box).find("#tb_"+this.name+"_item_"+w2utils.escapeId(t.id)).remove(),null!=(e=this.get(t.id,!0))&&this.items.splice(e,1))}),this.resize(),i}set(e,t){var i=this.get(e);return null!=i&&(Object.assign(i,t),this.refresh(String(e).split(":")[0]),!0)}get(e,i){if(0===arguments.length){var t=[];for(let e=0;e span`);0{var t=this.get(e);t&&(t.hidden=!1,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e),this.resize()})},15),i}hide(){let i=[];return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&(t.hidden=!0,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e),this.tooltipHide(e),this.resize()})},15),i}enable(){let i=[];return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&(t.disabled=!1,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e)})},15),i}disable(){let i=[];return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&(t.disabled=!0,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e),this.tooltipHide(e)})},15),i}check(){let i=[];return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&-1==String(e).indexOf(":")&&(t.checked=!0,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e)})},15),i}uncheck(){let i=[];return Array.from(arguments).forEach(e=>{var t=this.get(e);t&&-1==String(e).indexOf(":")&&(["menu","menu-radio","menu-check","drop","color","text-color"].includes(t.type)&&t.checked&&w2tooltip.hide(this.name+"-drop"),t.checked=!1,i.push(String(e).split(":")[0]))}),setTimeout(()=>{i.forEach(e=>{this.refresh(e)})},15),i}click(e,t){var i=String(e).split(":");let l=this.get(i[0]),r=l&&l.items?w2utils.normMenu.call(this,l.items,l):[];if(1{var t=(e,t)=>{let i=this;return function(){i.set(e,{checked:!1})}},i=query(this.box).find("#tb_"+this.name+"_item_"+w2utils.escapeId(l.id));if(w2utils.isPlainObject(l.overlay)||(l.overlay={}),"drop"==l.type&&w2tooltip.show(w2utils.extend({html:l.html,class:"w2ui-white",hideOn:["doc-click"]},l.overlay,{anchor:i[0],name:this.name+"-drop",data:{item:l,btn:s}})).hide(t(l.id,s)),["menu","menu-radio","menu-check"].includes(l.type)){let e="normal";"menu-radio"==l.type&&(e="radio",r.forEach(e=>{l.selected==e.id?e.checked=!0:e.checked=!1})),"menu-check"==l.type&&(e="check",r.forEach(e=>{Array.isArray(l.selected)&&l.selected.includes(e.id)?e.checked=!0:e.checked=!1})),w2menu.show(w2utils.extend({items:r},l.overlay,{type:e,name:this.name+"-drop",anchor:i[0],data:{item:l,btn:s}})).hide(t(l.id,s)).remove(e=>{this.menuClick({name:this.name,remove:!0,item:l,subItem:e.detail.item,originalEvent:e})}).select(e=>{this.menuClick({name:this.name,item:l,subItem:e.detail.item,originalEvent:e})})}["color","text-color"].includes(l.type)&&w2color.show(w2utils.extend({color:l.color},l.overlay,{anchor:i[0],name:this.name+"-drop",data:{item:l,btn:s}})).hide(t(l.id,s)).select(e=>{null!=e.detail.color&&this.colorClick({name:this.name,item:l,color:e.detail.color})})},0)}if(["check","menu","menu-radio","menu-check","drop","color","text-color"].includes(l.type)&&(l.checked=!l.checked,l.checked?query(this.box).find(s).addClass("checked"):query(this.box).find(s).removeClass("checked")),l.route){let t=String("/"+l.route).replace(/\/{2,}/g,"/");var a=w2utils.parseRoute(t);if(0{window.location.hash=t},1)}this.tooltipShow(e),i.finish()}}}scroll(a,o,h){return new Promise((e,t)=>{var i=query(this.box).find(`.w2ui-tb-line:nth-child(${o}) .w2ui-scroll-wrapper`),s=i.get(0).scrollLeft,l=i.find(".w2ui-tb-right").get(0),r=i.parent().get(0).getBoundingClientRect().width,n=s+parseInt(l.offsetLeft)+parseInt(l.clientWidth);switch(a){case"left":(scroll=s-r+50)<=0&&(scroll=0),i.get(0).scrollTo({top:0,left:scroll,behavior:h?"atuo":"smooth"});break;case"right":(scroll=s+r-50)>=n-r&&(scroll=n-r),i.get(0).scrollTo({top:0,left:scroll,behavior:h?"atuo":"smooth"})}setTimeout(()=>{this.resize(),e()},h?0:500)})}render(e){var s=Date.now(),l=("string"==typeof e&&(e=query(e).get(0)),this.trigger("render",{target:this.name,box:e??this.box}));if(!0!==l.isCancelled&&(null!=e&&(0 ",r),null!=r.hint&&console.log("NOTICE: toolbar item.hint property is deprecated, please use item.tooltip. Item -> ",r),0!==e&&"new-line"!=r.type||(i++,t+=` -
      -
      -
      ${this.right[i-1]??""}
      -
      -
      -
      -
      - `),r.line=i)}return query(this.box).attr("name",this.name).addClass("w2ui-reset w2ui-toolbar").html(t),0{this.resize()}),this.last.observeResize.observe(this.box),this.refresh(),this.resize(),l.finish(),Date.now()-s}}refresh(t){var i=Date.now(),l=this.trigger("refresh",{target:null!=t?t:this.name,item:this.get(t)});if(!0!==l.isCancelled){let e;if(null==t)for(let e=0;e{i[e].anchor==s.get(0)&&(i[e].anchor=t)})}if(["menu","menu-radio","menu-check"].includes(r.type)&&r.checked){let t=Array.isArray(r.selected)?r.selected:[r.selected];r.items.forEach(e=>{t.includes(e.id)?e.checked=!0:e.checked=!1}),w2menu.update(this.name+"-drop",r.items)}return"function"==typeof r.onRefresh&&e.finish(),l.finish(),Date.now()-i}}}}resize(){var e=Date.now(),t=this.trigger("resize",{target:this.name});if(!0!==t.isCancelled)return query(this.box).find(".w2ui-tb-line").each(e=>{var e=query(e),t=(e.find(".w2ui-scroll-left, .w2ui-scroll-right").hide(),e.find(".w2ui-scroll-wrapper").get(0)),i=e.find(".w2ui-tb-right"),s=e.get(0).getBoundingClientRect().width,i=0e.id==t)}),""),s="function"==typeof i.text?i.text.call(this,i):i.text;i.icon&&(t=i.icon,"function"==typeof i.icon&&(t=i.icon.call(this,i)),t=`
      ${t="<"!==String(t).slice(0,1)?``:t}
      `);var l=["w2ui-tb-button"];switch(i.checked&&l.push("checked"),i.disabled&&l.push("disabled"),i.hidden&&l.push("hidden"),t||l.push("no-icon"),i.type){case"color":case"text-color":"string"==typeof i.color&&("#"==i.color.slice(0,1)&&(i.color=i.color.slice(1)),[3,6,8].includes(i.color.length)&&(i.color="#"+i.color)),"color"==i.type&&(s=` - `+(i.text?`
      ${w2utils.lang(i.text)}
      `:"")),"text-color"==i.type&&(s=''+(i.text?w2utils.lang(i.text):"Aa")+"");case"menu":case"menu-check":case"menu-radio":case"button":case"check":case"radio":case"drop":var r=!0===i.arrow||!1!==i.arrow&&["menu","menu-radio","menu-check","drop","color","text-color"].includes(i.type);e=` -
      - ${t} - ${""!=s?`
      - ${w2utils.lang(s)} - ${null!=i.count?w2utils.stripSpaces(` - ${i.count} - `):""} - ${r?'':""} -
      `:""} -
      - `;break;case"break":e=`
      -   -
      `;break;case"spacer":e=`
      -
      `;break;case"html":e=`
      - ${"function"==typeof i.html?i.html.call(this,i):i.html} -
      `}return e}tooltipShow(t){if(null!=this.tooltip){var i=query(this.box).find("#tb_"+this.name+"_item_"+w2utils.escapeId(t)).get(0),t=this.get(t),s=this.tooltip;let e=t.tooltip;"function"==typeof e&&(e=e.call(this,t)),["menu","menu-radio","menu-check","drop","color","text-color"].includes(t.type)&&1==t.checked||w2tooltip.show({anchor:i,name:this.name+"-tooltip",html:e,position:s})}}tooltipHide(e){null!=this.tooltip&&w2tooltip.hide(this.name+"-tooltip")}menuClick(t){if(t.item&&!t.item.disabled){var i=this.trigger(!0!==t.remove?"click":"remove",{target:t.item.id+":"+t.subItem.id,item:t.item,subItem:t.subItem,originalEvent:t.originalEvent});if(!0!==i.isCancelled){let l=t.subItem,r=this.get(t.item.id),e=r.items;if("function"==typeof e&&(e=r.items()),"menu"==r.type&&(r.selected=l.id),"menu-radio"==r.type&&(r.selected=l.id,Array.isArray(e)&&e.forEach(e=>{!0===e.checked&&delete e.checked,Array.isArray(e.items)&&e.items.forEach(e=>{!0===e.checked&&delete e.checked})}),l.checked=!0),"menu-check"==r.type)if(Array.isArray(r.selected)||(r.selected=[]),null==l.group){var n=r.selected.indexOf(l.id);-1==n?(r.selected.push(l.id),l.checked=!0):(r.selected.splice(n,1),l.checked=!1)}else if(!1!==l.group){let i=[];n=r.selected.indexOf(l.id);let s=e=>{e.forEach(e=>{var t;e.group===l.group&&-1!=(t=r.selected.indexOf(e.id))&&(e.id!=l.id&&i.push(e.id),r.selected.splice(t,1)),Array.isArray(e.items)&&s(e.items)})};s(e),-1==n&&(r.selected.push(l.id),l.checked=!0)}if("string"==typeof l.route){let t=""!==l.route?String("/"+l.route).replace(/\/{2,}/g,"/"):"";var s=w2utils.parseRoute(t);if(0{window.location.hash=t},1)}this.refresh(t.item.id),i.finish()}}}colorClick(e){var t;e.item&&!e.item.disabled&&!0!==(t=this.trigger("click",{target:e.item.id,item:e.item,color:e.color,final:e.final,originalEvent:e.originalEvent})).isCancelled&&(e.item.color=e.color,this.refresh(e.item.id),t.finish())}mouseAction(e,t,i,s){var l=this.get(s),e=this.trigger("mouse"+i,{target:s,item:l,object:l,originalEvent:e});if(!0!==e.isCancelled&&!l.disabled&&!l.hidden){switch(i){case"Enter":query(t).addClass("over"),this.tooltipShow(s);break;case"Leave":query(t).removeClass("over down"),this.tooltipHide(s);break;case"Down":query(t).addClass("down");break;case"Up":query(t).removeClass("down")}e.finish()}}}class w2sidebar extends w2base{constructor(e){super(e.name),this.name=null,this.box=null,this.sidebar=null,this.parent=null,this.nodes=[],this.menu=[],this.routeData={},this.selected=null,this.icon=null,this.style="",this.topHTML="",this.bottomHTML="",this.flatButton=!1,this.keyboard=!0,this.flat=!1,this.hasFocus=!1,this.levelPadding=12,this.skipRefresh=!1,this.tabIndex=null,this.handle={size:0,style:"",html:"",tooltip:""},this.onClick=null,this.onDblClick=null,this.onMouseEnter=null,this.onMouseLeave=null,this.onContextMenu=null,this.onMenuClick=null,this.onExpand=null,this.onCollapse=null,this.onKeydown=null,this.onRender=null,this.onRefresh=null,this.onResize=null,this.onDestroy=null,this.onFocus=null,this.onBlur=null,this.onFlat=null,this.node_template={id:null,text:"",order:null,count:null,icon:null,nodes:[],style:"",route:null,selected:!1,expanded:!1,hidden:!1,disabled:!1,group:!1,groupShowHide:!0,collapsible:!1,plus:!1,onClick:null,onDblClick:null,onContextMenu:null,onExpand:null,onCollapse:null,parent:null,sidebar:null},this.last={badge:{}};var t=e.nodes;delete e.nodes,Object.assign(this,e),Array.isArray(t)&&this.add(t),e.nodes=t,"string"==typeof this.box&&(this.box=query(this.box).get(0)),this.box&&this.render(this.box)}add(e,t){return 1==arguments.length&&(t=arguments[0],e=this),"string"==typeof e&&(e=this.get(e)),this.insert(e=null!=e&&""!=e?e:this,null,t)}insert(t,i,s){let l,r,n,a,o;if(2==arguments.length&&"string"==typeof t)if(s=arguments[1],null!=(i=arguments[0])){if(null==(r=this.get(i)))return null!=(s=Array.isArray(s)?s:[s])[0].caption&&null==s[0].text&&(console.log("NOTICE: sidebar node.caption property is deprecated, please use node.text. Node -> ",s[0]),s[0].text=s[0].caption),l=s[0].text,console.log('ERROR: Cannot insert node "'+l+'" because cannot find node "'+i+'" to insert before.'),null;t=this.get(i).parent}else t=this;null!=(t="string"==typeof t?this.get(t):t)&&""!=t||(t=this),Array.isArray(s)||(s=[s]);for(let e=0;e{null!=(i=this.get(e))&&(null!=this.selected&&this.selected===i.id&&(this.selected=null),null!=(e=this.get(i.parent,e,!0))&&(i.parent.nodes[e].selected&&i.sidebar.unselect(i.id),i.parent.nodes.splice(e,1),t++))}),this.skipRefresh||(0{var e=i.nodes&&0{e.nodes&&0{t.call(this,e),e.nodes&&0{-1===e.text.toLowerCase().indexOf(i)?e.hidden=!0:(t++,function e(t){t.parent&&(t.parent.hidden=!1,e(t.parent))}(e),e.hidden=!1)}),this.refresh(),t}show(){let t=[];return Array.from(arguments).forEach(e=>{e=this.get(e);null!=e&&!1!==e.hidden&&(e.hidden=!1,t.push(e.id))}),0{e=this.get(e);null!=e&&!0!==e.hidden&&(e.hidden=!0,t.push(e.id))}),0{e=this.get(e);null!=e&&!1!==e.disabled&&(e.disabled=!1,t.push(e.id))}),0{e=this.get(e);null!=e&&!0!==e.disabled&&(e.disabled=!0,e.selected&&this.unselect(e.id),t.push(e.id))}),0{t.refresh(e)},0),!0):void 0)}expand(e){var t=this.get(e),i=this.trigger("expand",{target:e,object:t});if(!0!==i.isCancelled)return query(this.box).find("#node_"+w2utils.escapeId(e)+"_sub").show(),query(this.box).find("#node_"+w2utils.escapeId(e)+" .w2ui-collapsed").removeClass("w2ui-collapsed").addClass("w2ui-expanded"),t.expanded=!0,i.finish(),this.refresh(e),!0}collapseAll(t){if(null==(t="string"==typeof(t=null==t?this:t)?this.get(t):t).nodes)return!1;for(let e=0;e{var t=query(e).attr("id").replace("node_",""),t=n.get(t);null!=t&&(t.selected=!1),query(e).removeClass("w2ui-selected").find(".w2ui-icon").removeClass("w2ui-icon-selected")});let t=query(n.box).find("#node_"+w2utils.escapeId(l)),s=query(n.box).find("#node_"+w2utils.escapeId(n.selected));t.addClass("w2ui-selected").find(".w2ui-icon").addClass("w2ui-icon-selected"),setTimeout(()=>{var e=n.trigger("click",{target:l,originalEvent:r,node:a,object:a});if(!0===e.isCancelled)t.removeClass("w2ui-selected").find(".w2ui-icon").removeClass("w2ui-icon-selected"),s.addClass("w2ui-selected").find(".w2ui-icon").addClass("w2ui-icon-selected");else{if(null!=s&&(s.selected=!1),n.get(l).selected=!0,n.selected=l,"string"==typeof a.route){let t=""!==a.route?String("/"+a.route).replace(/\/{2,}/g,"/"):"";var i=w2utils.parseRoute(t);if(0{window.location.hash=t},1)}e.finish()}},1)}}focus(e){let t=this;e=this.trigger("focus",{target:this.name,originalEvent:e});if(!0===e.isCancelled)return!1;this.hasFocus=!0,query(this.box).find(".w2ui-sidebar-body").addClass("w2ui-focus"),setTimeout(()=>{var e=query(t.box).find("#sidebar_"+t.name+"_focus").get(0);document.activeElement!=e&&e.focus()},10),e.finish()}blur(e){e=this.trigger("blur",{target:this.name,originalEvent:e});if(!0===e.isCancelled)return!1;this.hasFocus=!1,query(this.box).find(".w2ui-sidebar-body").removeClass("w2ui-focus"),e.finish()}keydown(e){let n=this,t=n.get(n.selected);var i;function s(e,t){null==e||e.hidden||e.disabled||e.group||(n.click(e.id,t),n.inView(e.id)||n.scrollIntoView(e.id))}function l(e,t){for(e=t(e);null!=e&&(e.hidden||e.disabled)&&!e.group;)e=t(e);return e}function r(e){if(null==e)return null;var t=e.parent,e=n.get(e.id,!0);let i=0t.clientHeight+t.scrollTop))}scrollIntoView(i,s){return new Promise((e,t)=>{null==i&&(i=this.selected),null!=this.get(i)&&(query(this.box).find("#node_"+w2utils.escapeId(i)).get(0).scrollIntoView({block:"center",inline:"center",behavior:s?"atuo":"smooth"}),setTimeout(()=>{this.resize(),e()},s?0:500))})}dblClick(e,t){var i=this.get(e),t=this.trigger("dblClick",{target:e,originalEvent:t,object:i});!0!==t.isCancelled&&(this.toggle(e),t.finish())}contextMenu(t,i){var e=this.get(t),s=(t!=this.selected&&this.click(t),this.trigger("contextMenu",{target:t,originalEvent:i,object:e,allowOnDisabled:!1}));!0===s.isCancelled||e.disabled&&!s.allowOnDisabled||(0{this.menuClick(t,parseInt(e.detail.index),i)}),i.preventDefault&&i.preventDefault(),s.finish())}menuClick(e,t,i){e=this.trigger("menuClick",{target:e,originalEvent:i,menuIndex:t,menuItem:this.menu[t]});!0!==e.isCancelled&&e.finish()}goFlat(){var e=this.trigger("flat",{goFlat:!this.flat});!0!==e.isCancelled&&(this.flat=!this.flat,this.refresh(),e.finish())}render(e){var i=Date.now();let s=this;"string"==typeof e&&(e=query(e).get(0));var l=this.trigger("render",{target:this.name,box:e??this.box});if(!0!==l.isCancelled&&(null!=e&&(0 -
      - -
      -
      - `);e=query(this.box).get(0).getBoundingClientRect();query(this.box).find(":scope > div").css({width:e.width+"px",height:e.height+"px"}),query(this.box).get(0).style.cssText+=this.style;let t;return query(this.box).find("#sidebar_"+this.name+"_focus").on("focus",function(e){clearTimeout(t),s.hasFocus||s.focus(e)}).on("blur",function(e){t=setTimeout(()=>{s.hasFocus&&s.blur(e)},100)}).on("keydown",function(e){9!=e.keyCode&&w2ui[s.name].keydown.call(w2ui[s.name],e)}),query(this.box).off("mousedown").on("mousedown",function(t){setTimeout(()=>{var e;-1==["INPUT","TEXTAREA","SELECT"].indexOf(t.target.tagName.toUpperCase())&&(e=query(s.box).find("#sidebar_"+s.name+"_focus"),document.activeElement!=e.get(0)&&e.get(0).focus())},1)}),this.last.observeResize=new ResizeObserver(()=>{this.resize()}),this.last.observeResize.observe(this.box),l.finish(),this.refresh(),Date.now()-i}}update(e,t){var i,s,e=this.get(e);let l;return e&&(i=query(this.box).find("#node_"+w2utils.escapeId(e.id)),e.group?(t.text&&(e.text=t.text,i.find(".w2ui-group-text").replace("function"==typeof e.text?e.text.call(this,e):''+e.text+""),delete t.text),t.class&&(e.class=t.class,l=i.data("level"),i.get(0).className="w2ui-node-group w2ui-level-"+l+(e.class?" "+e.class:""),delete t.class),t.style&&(e.style=t.style,i.get(0).nextElementSibling.style=e.style+";"+(!e.hidden&&e.expanded?"":"display: none;"),delete t.style)):(t.icon&&0<(s=i.find(".w2ui-node-image > span")).length&&(e.icon=t.icon,s[0].className="function"==typeof e.icon?e.icon.call(this,e):e.icon,delete t.icon),t.count&&(e.count=t.count,i.find(".w2ui-node-count").html(e.count),0`),null!=l||""===this.topHTML&&""===e||(query(this.box).find(".w2ui-sidebar-top").html(this.topHTML+e),query(this.box).find(".w2ui-sidebar-body").css("top",query(this.box).find(".w2ui-sidebar-top").get(0)?.clientHeight+"px"),query(this.box).find(".w2ui-flat").off("clcik").on("click",e=>{this.goFlat()})),null!=l&&""!==this.bottomHTML&&(query(this.box).find(".w2ui-sidebar-bottom").html(this.bottomHTML),query(this.box).find(".w2ui-sidebar-body").css("bottom",query(this.box).find(".w2ui-sidebar-bottom").get(0)?.clientHeight+"px")),query(this.box).find(":scope > div").removeClass("w2ui-sidebar-flat").addClass(this.flat?"w2ui-sidebar-flat":"").css({width:query(this.box).get(0)?.clientWidth+"px",height:query(this.box).get(0)?.clientHeight+"px"}),0'),query(this.box).find(o).remove(),query(this.box).find(i).remove(),query(this.box).find("#sidebar_"+this.name+"_tmp").before(s),query(this.box).find("#sidebar_"+this.name+"_tmp").remove());var l=query(this.box).find(":scope > div").get(0),d={top:l?.scrollTop,left:l?.scrollLeft};query(this.box).find(i).html("");for(let e=0;e ",t),t.text=t.caption),Array.isArray(t.nodes)&&0${e}`),i=` -
      - ${t.groupShowHide&&t.collapsible?`${!t.hidden&&t.expanded?w2utils.lang("Hide"):w2utils.lang("Show")}`:""} ${e} -
      -
      -
      `,h.flat&&(i=` -
       
      -
      `)}else{t.selected&&!t.disabled&&(h.selected=t.id),l="",s&&(l=` -
      - -
      `);let e="";var n=null!=t.count?`
      - ${t.count} -
      `:"",a=(!0===t.collapsible&&(e=`
      `),w2utils.lang("function"==typeof t.text?t.text.call(h,t):t.text)),o=["w2ui-node","w2ui-level-"+r,"w2ui-eaction"];t.selected&&o.push("w2ui-selected"),t.disabled&&o.push("w2ui-disabled"),t.class&&o.push(t.class),i=` -
      - ${h.handle.html?`
      - ${"function"==typeof h.handle.html?h.handle.html.call(h,t):h.handle.html} -
      `:""} -
      - ${e} ${l} ${n} -
      ${a}
      -
      -
      -
      `,h.flat&&(i=` -
      -
      ${l}
      -
      -
      `)}return i}}}}mouseAction(e,t,i,s,l){var r=this.get(i),n=w2utils.lang("function"==typeof r.text?r.text.call(this,r):r.text)+(r.count||0===r.count?' - '+r.count+"":""),e=this.trigger("mouse"+e,{target:i,node:r,tooltip:n,originalEvent:s});"tooltip"==l&&this.tooltip(t,n,i),"handle"==l&&this.handleTooltip(t,i),e.finish()}tooltip(e,t,i){e=query(e).find(".w2ui-node-data");""!==t?w2tooltip.show({anchor:e.get(0),name:this.name+"_tooltip",html:t,position:"right|left"}):w2tooltip.hide(this.name+"_tooltip")}handleTooltip(e,t){let i=this.handle.tooltip;""!==(i="function"==typeof i?i(t):i)&&null!=t?w2tooltip.show({anchor:e,name:this.name+"_tooltip",html:i,position:"top|bottom"}):w2tooltip.hide(this.name+"_tooltip")}showPlus(e,t){query(e).find("span:nth-child(1)").css("color",t)}resize(){var e,t=Date.now(),i=this.trigger("resize",{target:this.name});if(!0!==i.isCancelled)return e=query(this.box).get(0).getBoundingClientRect(),query(this.box).css("overflow","hidden"),query(this.box).find(":scope > div").css({width:e.width+"px",height:e.height+"px"}),i.finish(),Date.now()-t}destroy(){var e=this.trigger("destroy",{target:this.name});!0!==e.isCancelled&&(0{var t,i;null==e.id?console.log(`ERROR: The parameter "id" is required but not supplied. (obj: ${this.name})`):w2utils.checkUniqueId(e.id,this.tabs,"tabs",this.name)&&(e=Object.assign({},this.tab_template,e),null==s?(this.tabs.push(e),l.push(this.animateInsert(null,e))):(t=this.get(s,!0),i=this.tabs[t].id,this.tabs.splice(t,0,e),l.push(this.animateInsert(i,e))))}),Promise.all(l)}remove(){let t=0;return Array.from(arguments).forEach(e=>{e=this.get(e);e&&(t++,this.tabs.splice(this.get(e.id,!0),1),query(this.box).find(`#tabs_${this.name}_tab_`+w2utils.escapeId(e.id)).remove())}),this.resize(),t}select(e){return this.active!=e&&null!=this.get(e)&&(this.active=e,this.refresh(),!0)}set(e,t){var i=this.get(e,!0);return null!=i&&(w2utils.extend(this.tabs[i],t),this.refresh(e),!0)}get(t,i){if(0===arguments.length){var s=[];for(let e=0;e{e=this.get(e);e&&!1!==e.hidden&&(e.hidden=!1,t.push(e.id))}),setTimeout(()=>{t.forEach(e=>{this.refresh(e),this.resize()})},15),t}hide(){let t=[];return Array.from(arguments).forEach(e=>{e=this.get(e);e&&!0!==e.hidden&&(e.hidden=!0,t.push(e.id))}),setTimeout(()=>{t.forEach(e=>{this.refresh(e),this.resize()})},15),t}enable(){let t=[];return Array.from(arguments).forEach(e=>{e=this.get(e);e&&!1!==e.disabled&&(e.disabled=!1,t.push(e.id))}),setTimeout(()=>{t.forEach(e=>{this.refresh(e)})},15),t}disable(){let t=[];return Array.from(arguments).forEach(e=>{e=this.get(e);e&&!0!==e.disabled&&(e.disabled=!0,t.push(e.id))}),setTimeout(()=>{t.forEach(e=>{this.refresh(e)})},15),t}dragMove(i){if(this.last.reordering){let s=this;var l=this.last.moving,r=this.tabs[l.index],n=h(l.index,1),a=h(l.index,-1),r=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(r.id));if(0t)return n=this.tabs.indexOf(n),this.tabs.splice(l.index,0,this.tabs.splice(n,1)[0]),l.$tab.before(o.get(0)),l.$tab.css("opacity",0),void Object.assign(this.last.moving,{index:n,divX:-e,x:i.pageX+e,left:l.left+l.divX+e})}if(l.divX<0&&a){o=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(a.id));let e=parseInt(r.get(0).clientWidth),t=parseInt(o.get(0).clientWidth);e=et&&(n=this.tabs.indexOf(a),this.tabs.splice(l.index,0,this.tabs.splice(n,1)[0]),o.before(l.$tab),l.$tab.css("opacity",0),Object.assign(l,{index:n,divX:e,x:i.pageX-e,left:l.left+l.divX-e}))}function h(e,t){e+=t;let i=s.tabs[e];return i=i&&i.hidden?h(e,t):i}}}mouseAction(e,t,i){var s=this.get(t),l=this.trigger("mouse"+e,{target:t,tab:s,object:s,originalEvent:i});if(!0!==l.isCancelled&&!s.disabled&&!s.hidden){switch(e){case"Enter":this.tooltipShow(t);break;case"Leave":this.tooltipHide(t);break;case"Down":this.initReorder(t,i)}l.finish()}}tooltipShow(t){var i=this.get(t),t=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(t)).get(0);if(null!=this.tooltip&&!i.disabled&&!this.last.reordering){var s=this.tooltip;let e=i.tooltip;"function"==typeof e&&(e=e.call(this,i)),w2tooltip.show({anchor:t,name:this.name+"_tooltip",html:e,position:s})}}tooltipHide(e){null!=this.tooltip&&w2tooltip.hide(this.name+"_tooltip")}getTabHTML(e){e=this.get(e,!0),e=this.tabs[e];if(null==e)return!1;null==e.text&&null!=e.caption&&(e.text=e.caption),null==e.tooltip&&null!=e.hint&&(e.tooltip=e.hint),null!=e.caption&&console.log("NOTICE: tabs tab.caption property is deprecated, please use tab.text. Tab -> ",e),null!=e.hint&&console.log("NOTICE: tabs tab.hint property is deprecated, please use tab.tooltip. Tab -> ",e);let t=e.text,i=(null==(t="function"==typeof t?t.call(this,e):t)&&(t=""),""),s="";return e.hidden&&(s+="display: none;"),e.disabled&&(s+="opacity: 0.2;"),e.closable&&!e.disabled&&(i=`
      -
      `),` -
      - ${w2utils.lang(t)+i} -
      `}refresh(e){var t=Date.now(),i=("up"==this.flow?query(this.box).addClass("w2ui-tabs-up"):query(this.box).removeClass("w2ui-tabs-up"),this.trigger("refresh",{target:null!=e?e:this.name,object:this.get(e)}));if(!0!==i.isCancelled){if(null==e)for(let e=0;e -
      -
      ${this.right}
      -
      -
      -
      `,query(this.box).attr("name",this.name).addClass("w2ui-reset w2ui-tabs").html(e),0{this.resize()}),this.last.observeResize.observe(this.box),i.finish(),this.refresh(),this.resize(),Date.now()-t)}initReorder(e,n){if(this.reorder){let t=this,i=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(e)),s=this.get(e,!0),l=query(i.get(0).cloneNode(!0)),r;l.attr("id","#tabs_"+this.name+"_tab_ghost"),this.last.moving={index:s,indexFrom:s,$tab:i,$ghost:l,divX:0,left:i.get(0).getBoundingClientRect().left,parentX:query(this.box).get(0).getBoundingClientRect().left,x:n.pageX,opacity:i.css("opacity")},query(document).off(".w2uiTabReorder").on("mousemove.w2uiTabReorder",function(e){if(!t.last.reordering){if(!0===(r=t.trigger("reorder",{target:t.tabs[s].id,indexFrom:s,tab:t.tabs[s]})).isCancelled)return;w2tooltip.hide(this.name+"_tooltip"),t.last.reordering=!0,l.addClass("moving"),l.css({"pointer-events":"none",position:"absolute",left:i.get(0).getBoundingClientRect().left}),i.css("opacity",0),query(t.box).find(".w2ui-scroll-wrapper").append(l.get(0)),query(t.box).find(".w2ui-tab-close").hide()}t.last.moving.divX=e.pageX-t.last.moving.x,l.css("left",t.last.moving.left-t.last.moving.parentX+t.last.moving.divX+"px"),t.dragMove(e)}).on("mouseup.w2uiTabReorder",function(){query(document).off(".w2uiTabReorder"),l.css({transition:"0.1s",left:t.last.moving.$tab.get(0).getBoundingClientRect().left-t.last.moving.parentX}),query(t.box).find(".w2ui-tab-close").show(),setTimeout(()=>{l.remove(),i.css({opacity:t.last.moving.opacity}),t.last.reordering&&r.finish({indexTo:t.last.moving.index}),t.last.reordering=!1},100)})}}scroll(a,o){return new Promise((e,t)=>{var i=query(this.box).find(".w2ui-scroll-wrapper"),s=i.get(0).scrollLeft,l=i.find(".w2ui-tabs-right").get(0),r=i.parent().get(0).getBoundingClientRect().width,n=s+parseInt(l.offsetLeft)+parseInt(l.clientWidth);switch(a){case"left":{let e=s-r+50;e<=0&&(e=0),i.get(0).scrollTo({top:0,left:e,behavior:o?"atuo":"smooth"});break}case"right":{let e=s+r-50;e>=n-r&&(e=n-r),i.get(0).scrollTo({top:0,left:e,behavior:o?"atuo":"smooth"});break}}setTimeout(()=>{this.resize(),e()},o?0:350)})}scrollIntoView(i,s){return new Promise((e,t)=>{null==i&&(i=this.active),null!=this.get(i)&&(query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(i)).get(0).scrollIntoView({block:"start",inline:"center",behavior:s?"atuo":"smooth"}),setTimeout(()=>{this.resize(),e()},s?0:500))})}resize(){var e=Date.now();if(null!=this.box){var t,i,s,l,r=this.trigger("resize",{target:this.name});if(!0!==r.isCancelled)return(t=query(this.box)).find(".w2ui-scroll-left, .w2ui-scroll-right").hide(),i=t.find(".w2ui-scroll-wrapper").get(0),l=t.find(".w2ui-tabs-right"),(s=t.get(0).getBoundingClientRect().width)<(l=0{window.location.hash=t},1)}e.finish()}}clickClose(e,t){var i=this.get(e);if(null==i||i.disabled)return!1;let s=this.trigger("close",{target:e,object:i,tab:i,originalEvent:t});!0!==s.isCancelled&&(this.animateClose(e).then(()=>{this.remove(e),s.finish(),this.refresh()}),t&&t.stopPropagation())}animateClose(r){return new Promise((e,t)=>{var i=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(r)),s=parseInt(i.get(0).clientWidth||0);let l=i.replace(`
      `);setTimeout(()=>{l.css({width:"0px"})},1),setTimeout(()=>{l.remove(),this.resize(),e()},500)})}animateInsert(t,r){return new Promise((i,e)=>{let s=query(this.box).find("#tabs_"+this.name+"_tab_"+w2utils.escapeId(t)),l=query.html(this.getTabHTML(r.id));if(0==s.length)(s=query(this.box).find("#tabs_tabs_right")).before(l),this.resize();else{l.css({opacity:0}),query(this.box).find("#tabs_tabs_right").before(l.get(0));let e=query(this.box).find("#"+l.attr("id")).get(0).clientWidth??0,t=query.html('
      ');s.before(t),l.hide(),t.before(l[0]),setTimeout(()=>{t.css({width:e+"px"})},1),setTimeout(()=>{t.remove(),l.css({opacity:1}).show(),this.refresh(r.id),this.resize(),i()},500)}})}}let w2panels=["top","left","main","preview","right","bottom"];class w2layout extends w2base{constructor(e){super(e.name),this.box=null,this.name=null,this.panels=[],this.last={},this.padding=1,this.resizer=4,this.style="",this.onShow=null,this.onHide=null,this.onResizing=null,this.onResizerClick=null,this.onRender=null,this.onRefresh=null,this.onChange=null,this.onResize=null,this.onDestroy=null,this.panel_template={type:null,title:"",size:100,minSize:20,maxSize:!1,hidden:!1,resizable:!1,overflow:"auto",style:"",html:"",tabs:null,toolbar:null,width:null,height:null,show:{toolbar:!1,tabs:!1},removed:null,onRefresh:null,onShow:null,onHide:null},Object.assign(this,e),Array.isArray(this.panels)||(this.panels=[]),this.panels.forEach((e,t)=>{var i,s,l;this.panels[t]=w2utils.extend({},this.panel_template,e),(w2utils.isPlainObject(e.tabs)||Array.isArray(e.tabs))&&function(e,t,i){var s=e.get(t);null!=s&&null==i&&(i=s.tabs);if(null==s||null==i)return;Array.isArray(i)&&(i={tabs:i});var l=e.name+"_"+t+"_tabs";w2ui[l]&&w2ui[l].destroy();s.tabs=new w2tabs(w2utils.extend({},i,{owner:e,name:e.name+"_"+t+"_tabs"})),s.show.tabs=!0}(this,e.type),(w2utils.isPlainObject(e.toolbar)||Array.isArray(e.toolbar))&&(t=this,e=e.type,i=void 0,null!=(s=t.get(e))&&null==i&&(i=s.toolbar),null!=s&&null!=i&&(Array.isArray(i)&&(i={items:i}),l=t.name+"_"+e+"_toolbar",w2ui[l]&&w2ui[l].destroy(),s.toolbar=new w2toolbar(w2utils.extend({},i,{owner:t,name:t.name+"_"+e+"_toolbar"})),s.show.toolbar=!0))}),w2panels.forEach(e=>{null==this.get(e)&&this.panels.push(w2utils.extend({},this.panel_template,{type:e,hidden:"main"!==e,size:50}))}),"string"==typeof this.box&&(this.box=query(this.box).get(0)),this.box&&this.render(this.box)}html(l,r,n){let a=this.get(l);var e={panel:l,html:a.html,error:!1,cancelled:!1,removed(e){"function"==typeof e&&(a.removed=e)}};if("function"==typeof a.removed&&(a.removed({panel:l,html:a.html,html_new:r,transition:n||"none"}),a.removed=null),"css"==l)query(this.box).find("#layout_"+this.name+"_panel_css").html(""),e.status=!0;else if(null==a)console.log("ERROR: incorrect panel name. Panel name can be main, left, right, top, bottom, preview or css"),e.error=!0;else if(null!=r){var t=this.trigger("change",{target:l,panel:a,html_new:r,transition:n});if(!0===t.isCancelled)e.cancelled=!0;else{let i="#layout_"+this.name+"_panel_"+a.type;var o=query(this.box).find(i+"> .w2ui-panel-content");let s=0;if(0 .w2ui-panel-content"),t=(e.after('
      '),query(this.box).find(i+"> .w2ui-panel-content.new-panel"));e.css("top",s),t.css("top",s),"object"==typeof r?(r.box=t[0],r.render()):t.hide().html(r),w2utils.transition(e[0],t[0],n,()=>{e.remove(),t.removeClass("new-panel"),t.css("overflow",a.overflow),query(query(this.box).find(i+"> .w2ui-panel-content").get(1)).remove(),query(this.box).removeClass("animating"),this.refresh(l)})}else this.refresh(l);t.finish()}}return e}message(e,t){var i=this.get(e);let s=query(this.box).find("#layout_"+this.name+"_panel_"+i.type),l=s.css("overflow");s.css("overflow","hidden");i=w2utils.message({owner:this,box:s.get(0),after:".w2ui-panel-title",param:e},t);return i&&i.self.on("close:after",()=>{s.css("overflow",l)}),i}confirm(e,t){var i=this.get(e);let s=query(this.box).find("#layout_"+this.name+"_panel_"+i.type),l=s.css("overflow");s.css("overflow","hidden");i=w2utils.confirm({owner:this,box:s.get(0),after:".w2ui-panel-title",param:e},t);return i&&i.self.on("close:after",()=>{s.css("overflow",l)}),i}load(i,s,l){return new Promise((t,e)=>{"css"!=i&&null==this.get(i)||null==s?e():fetch(s).then(e=>e.text()).then(e=>{this.resize(),t(this.html(i,e,l))})})}sizeTo(e,t,i){return null!=this.get(e)&&(query(this.box).find(":scope > div > .w2ui-panel").css("transition",!0!==i?".2s":"0s"),setTimeout(()=>{this.set(e,{size:t})},1),setTimeout(()=>{query(this.box).find(":scope > div > .w2ui-panel").css("transition","0s"),this.resize()},300),!0)}show(e,t){let i=this.trigger("show",{target:e,thisect:this.get(e),immediate:t});var s;if(!0!==i.isCancelled)return null!=(s=this.get(e))&&(!(s.hidden=!1)===t?(query(this.box).find("#layout_"+this.name+"_panel_"+e).css({opacity:"1"}),i.finish(),this.resize()):(query(this.box).addClass("animating"),query(this.box).find("#layout_"+this.name+"_panel_"+e).css({opacity:"0"}),query(this.box).find(":scope > div > .w2ui-panel").css("transition",".2s"),setTimeout(()=>{this.resize()},1),setTimeout(()=>{query(this.box).find("#layout_"+this.name+"_panel_"+e).css({opacity:"1"})},250),setTimeout(()=>{query(this.box).find(":scope > div > .w2ui-panel").css("transition","0s"),query(this.box).removeClass("animating"),i.finish(),this.resize()},300)),!0)}hide(e,t){let i=this.trigger("hide",{target:e,object:this.get(e),immediate:t});var s;if(!0!==i.isCancelled)return null!=(s=this.get(e))&&((s.hidden=!0)===t?(query(this.box).find("#layout_"+this.name+"_panel_"+e).css({opacity:"0"}),i.finish(),this.resize()):(query(this.box).addClass("animating"),query(this.box).find(":scope > div > .w2ui-panel").css("transition",".2s"),query(this.box).find("#layout_"+this.name+"_panel_"+e).css({opacity:"0"}),setTimeout(()=>{this.resize()},1),setTimeout(()=>{query(this.box).find(":scope > div > .w2ui-panel").css("transition","0s"),query(this.box).removeClass("animating"),i.finish(),this.resize()},300)),!0)}toggle(e,t){var i=this.get(e);return null!=i&&(i.hidden?this.show(e,t):this.hide(e,t))}set(e,t){var i=this.get(e,!0);return null!=i&&(w2utils.extend(this.panels[i],t),null==t.html&&null==t.resizable||this.refresh(e),this.resize(),!0)}get(t,i){for(let e=0;e .w2ui-panel-content");return 1!=e.length?null:e[0]}hideToolbar(e){var t=this.get(e);t&&(t.show.toolbar=!1,query(this.box).find("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-toolbar").hide(),this.resize())}showToolbar(e){var t=this.get(e);t&&(t.show.toolbar=!0,query(this.box).find("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-toolbar").show(),this.resize())}toggleToolbar(e){var t=this.get(e);t&&(t.show.toolbar?this.hideToolbar(e):this.showToolbar(e))}assignToolbar(e,t){"string"==typeof t&&null!=w2ui[t]&&(t=w2ui[t]);var i=this.get(e),s=(i.toolbar=t,query(this.box).find(e+"> .w2ui-panel-toolbar"));null!=i.toolbar?(0===s.find("[name="+i.toolbar.name+"]").length?i.toolbar.render(s.get(0)):null!=i.toolbar&&i.toolbar.refresh(),(t.owner=this).showToolbar(e),this.refresh(e)):(s.html(""),this.hideToolbar(e))}hideTabs(e){var t=this.get(e);t&&(t.show.tabs=!1,query(this.box).find("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-tabs").hide(),this.resize())}showTabs(e){var t=this.get(e);t&&(t.show.tabs=!0,query(this.box).find("#layout_"+this.name+"_panel_"+e+"> .w2ui-panel-tabs").show(),this.resize())}toggleTabs(e){var t=this.get(e);t&&(t.show.tabs?this.hideTabs(e):this.showTabs(e))}render(e){var t=Date.now();let o=this;"string"==typeof e&&(e=query(e).get(0));var i=this.trigger("render",{target:this.name,box:e??this.box});if(!0!==i.isCancelled){if(null!=e&&(0"),0
      ';query(this.box).find(":scope > div").append(s)}return query(this.box).find(":scope > div").append('
      '),this.refresh(),this.last.observeResize=new ResizeObserver(()=>{this.resize()}),this.last.observeResize.observe(this.box),i.finish(),setTimeout(()=>{o.last.events={resizeStart:l,mouseMove:n,mouseUp:r},this.resize()},0),Date.now()-t}function l(e,t){o.box&&(t=t||window.event,query(document).off("mousemove",o.last.events.mouseMove).on("mousemove",o.last.events.mouseMove),query(document).off("mouseup",o.last.events.mouseUp).on("mouseup",o.last.events.mouseUp),o.last.resize={type:e,x:t.screenX,y:t.screenY,diff_x:0,diff_y:0,value:0},w2panels.forEach(e=>{var t=query(o.el(e)).find(".w2ui-lock");0{var t=query(o.el(e)).find(".w2ui-lock");"yes"==t.data("locked")?t.removeData("locked"):o.unlock(e)}),0!==o.last.diff_x||0!==o.last.resize.diff_y){var s=o.get("top"),l=o.get("bottom"),r=o.get(o.last.resize.type),i=w2utils.getSize(query(o.box),"width"),n=w2utils.getSize(query(o.box),"height"),a=String(r.size);let e,t;switch(o.last.resize.type){case"top":e=parseInt(r.sizeCalculated)+o.last.resize.diff_y,t=0;break;case"bottom":e=parseInt(r.sizeCalculated)-o.last.resize.diff_y,t=0;break;case"preview":e=parseInt(r.sizeCalculated)-o.last.resize.diff_y,t=(s&&!s.hidden?s.sizeCalculated:0)+(l&&!l.hidden?l.sizeCalculated:0);break;case"left":e=parseInt(r.sizeCalculated)+o.last.resize.diff_x,t=0;break;case"right":e=parseInt(r.sizeCalculated)-o.last.resize.diff_x,t=0}"%"==a.substr(a.length-1)?r.size=Math.floor(100*e/("left"==r.type||"right"==r.type?i:n-t)*100)/100+"%":"-"==String(r.size).substr(0,1)?r.size=parseInt(r.size)-r.sizeCalculated+e:r.size=e,o.resize()}query(o.box).find("#layout_"+o.name+"_resizer_"+o.last.resize.type).removeClass("active"),delete o.last.resize}}function n(i){if(o.box&&(i=i||window.event,null!=o.last.resize)){var s=o.get(o.last.resize.type),l=o.last.resize,r=o.trigger("resizing",{target:o.name,object:s,originalEvent:i,panel:l?l.type:"all",diff_x:l?l.diff_x:0,diff_y:l?l.diff_y:0});if(!0!==r.isCancelled){var n=query(o.box).find("#layout_"+o.name+"_resizer_"+l.type);let e=i.screenX-l.x,t=i.screenY-l.y;var a=o.get("main");switch(n.hasClass("active")||n.addClass("active"),l.type){case"left":s.minSize-e>s.width&&(e=s.minSize-s.width),s.maxSize&&s.width+e>s.maxSize&&(e=s.maxSize-s.width),a.minSize+e>a.width&&(e=a.width-a.minSize);break;case"right":s.minSize+e>s.width&&(e=s.width-s.minSize),s.maxSize&&s.width-e>s.maxSize&&(e=s.width-s.maxSize),a.minSize-e>a.width&&(e=a.minSize-a.width);break;case"top":s.minSize-t>s.height&&(t=s.minSize-s.height),s.maxSize&&s.height+t>s.maxSize&&(t=s.maxSize-s.height),a.minSize+t>a.height&&(t=a.height-a.minSize);break;case"preview":case"bottom":s.minSize+t>s.height&&(t=s.height-s.minSize),s.maxSize&&s.height-t>s.maxSize&&(t=s.height-s.maxSize),a.minSize-t>a.height&&(t=a.minSize-a.height)}switch(l.diff_x=e,l.diff_y=t,l.type){case"top":case"preview":case"bottom":(l.diff_x=0) .w2ui-panel-content")[0],setTimeout(()=>{0 .w2ui-panel-content").length&&(query(l.box).find(t+"> .w2ui-panel-content").removeClass().removeAttr("name").addClass("w2ui-panel-content").css("overflow",e.overflow)[0].style.cssText+=";"+e.style),e.html&&"function"==typeof e.html.render&&e.html.render()},1)):0 .w2ui-panel-content").length&&(query(l.box).find(t+"> .w2ui-panel-content").removeClass().removeAttr("name").addClass("w2ui-panel-content").html(e.html).css("overflow",e.overflow)[0].style.cssText+=";"+e.style);let i=query(l.box).find(t+"> .w2ui-panel-tabs");e.show.tabs?0===i.find("[name="+e.tabs.name+"]").length&&null!=e.tabs?e.tabs.render(i.get(0)):e.tabs.refresh():i.html("").removeClass("w2ui-tabs").hide(),i=query(l.box).find(t+"> .w2ui-panel-toolbar"),e.show.toolbar?0===i.find("[name="+e.toolbar.name+"]").length&&null!=e.toolbar?e.toolbar.render(i.get(0)):e.toolbar.refresh():i.html("").removeClass("w2ui-toolbar").hide(),i=query(l.box).find(t+"> .w2ui-panel-title"),e.title?i.html(e.title).show():i.html("").hide()}else{if(0===query(l.box).find("#layout_"+l.name+"_panel_main").length)return void l.render();l.resize();for(let e=0;e div").css({width:o+"px",height:h+"px"});let i=this;var d=this.get("main"),u=this.get("preview"),c=this.get("left"),p=this.get("right"),f=this.get("top"),m=this.get("bottom"),g=null!=u&&!0!==u.hidden,y=null!=c&&!0!==c.hidden,w=null!=p&&!0!==p.hidden,b=null!=f&&!0!==f.hidden,v=null!=m&&!0!==m.hidden;let e,t,s,l;for(let e=0;ethis.padding?this.resizer:this.padding,query(this.box).find("#layout_"+this.name+"_resizer_top").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=i.trigger("resizerClick",{target:"top",originalEvent:e});if(!0!==t.isCancelled)return w2ui[i.name].last.events.resizeStart("top",e),t.finish(),!1}))):(query(this.box).find("#layout_"+this.name+"_panel_top").hide(),query(this.box).find("#layout_"+this.name+"_resizer_top").hide()),null!=c&&!0!==c.hidden?(e=0,t=0+(b?f.sizeCalculated+this.padding:0),s=c.sizeCalculated,l=h-(b?f.sizeCalculated+this.padding:0)-(v?m.sizeCalculated+this.padding:0),query(this.box).find("#layout_"+this.name+"_panel_left").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px"}),c.width=s,c.height=l,c.resizable&&(e=c.sizeCalculated-(0===this.padding?this.resizer:0),s=this.resizer>this.padding?this.resizer:this.padding,query(this.box).find("#layout_"+this.name+"_resizer_left").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px",cursor:"ew-resize"}).off("mousedown").on("mousedown",function(e){var t=i.trigger("resizerClick",{target:"left",originalEvent:e});if(!0!==t.isCancelled)return w2ui[i.name].last.events.resizeStart("left",e),t.finish(),!1}))):(query(this.box).find("#layout_"+this.name+"_panel_left").hide(),query(this.box).find("#layout_"+this.name+"_resizer_left").hide()),null!=p&&!0!==p.hidden?(e=o-p.sizeCalculated,t=0+(b?f.sizeCalculated+this.padding:0),s=p.sizeCalculated,l=h-(b?f.sizeCalculated+this.padding:0)-(v?m.sizeCalculated+this.padding:0),query(this.box).find("#layout_"+this.name+"_panel_right").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px"}),p.width=s,p.height=l,p.resizable&&(e-=this.padding,s=this.resizer>this.padding?this.resizer:this.padding,query(this.box).find("#layout_"+this.name+"_resizer_right").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px",cursor:"ew-resize"}).off("mousedown").on("mousedown",function(e){var t=i.trigger("resizerClick",{target:"right",originalEvent:e});if(!0!==t.isCancelled)return w2ui[i.name].last.events.resizeStart("right",e),t.finish(),!1}))):(query(this.box).find("#layout_"+this.name+"_panel_right").hide(),query(this.box).find("#layout_"+this.name+"_resizer_right").hide()),null!=m&&!0!==m.hidden?(e=0,t=h-m.sizeCalculated,s=o,l=m.sizeCalculated,query(this.box).find("#layout_"+this.name+"_panel_bottom").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px"}),m.width=s,m.height=l,m.resizable&&(t-=0===this.padding?0:this.padding,l=this.resizer>this.padding?this.resizer:this.padding,query(this.box).find("#layout_"+this.name+"_resizer_bottom").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=i.trigger("resizerClick",{target:"bottom",originalEvent:e});if(!0!==t.isCancelled)return w2ui[i.name].last.events.resizeStart("bottom",e),t.finish(),!1}))):(query(this.box).find("#layout_"+this.name+"_panel_bottom").hide(),query(this.box).find("#layout_"+this.name+"_resizer_bottom").hide()),e=0+(y?c.sizeCalculated+this.padding:0),t=0+(b?f.sizeCalculated+this.padding:0),s=o-(y?c.sizeCalculated+this.padding:0)-(w?p.sizeCalculated+this.padding:0),l=h-(b?f.sizeCalculated+this.padding:0)-(v?m.sizeCalculated+this.padding:0)-(g?u.sizeCalculated+this.padding:0),query(this.box).find("#layout_"+this.name+"_panel_main").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px"}),d.width=s,d.height=l,null!=u&&!0!==u.hidden?(e=0+(y?c.sizeCalculated+this.padding:0),t=h-(v?m.sizeCalculated+this.padding:0)-u.sizeCalculated,s=o-(y?c.sizeCalculated+this.padding:0)-(w?p.sizeCalculated+this.padding:0),l=u.sizeCalculated,query(this.box).find("#layout_"+this.name+"_panel_preview").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px"}),u.width=s,u.height=l,u.resizable&&(t-=0===this.padding?0:this.padding,l=this.resizer>this.padding?this.resizer:this.padding,query(this.box).find("#layout_"+this.name+"_resizer_preview").css({display:"block",left:e+"px",top:t+"px",width:s+"px",height:l+"px",cursor:"ns-resize"}).off("mousedown").on("mousedown",function(e){var t=i.trigger("resizerClick",{target:"preview",originalEvent:e});if(!0!==t.isCancelled)return w2ui[i.name].last.events.resizeStart("preview",e),t.finish(),!1}))):(query(this.box).find("#layout_"+this.name+"_panel_preview").hide(),query(this.box).find("#layout_"+this.name+"_resizer_preview").hide());for(let t=0;t .w2ui-panel-";let e=0;q&&(q.title&&(_=query(this.box).find(C+"title").css({top:e+"px",display:"block"}),e+=w2utils.getSize(_,"height")),q.show.tabs&&(_=query(this.box).find(C+"tabs").css({top:e+"px",display:"block"}),e+=w2utils.getSize(_,"height")),q.show.toolbar&&(q=query(this.box).find(C+"toolbar").css({top:e+"px",display:"block"}),e+=w2utils.getSize(q,"height"))),query(this.box).find(C+"content").css({display:"block"}).css({top:e+"px"})}return a.finish(),Date.now()-r}}destroy(){var e=this.trigger("destroy",{target:this.name});if(!0!==e.isCancelled)return null!=w2ui[this.name]&&(0'},add:{type:"button",id:"w2ui-add",text:"Add New",tooltip:"Add new record",icon:"w2ui-icon-plus"},edit:{type:"button",id:"w2ui-edit",text:"Edit",tooltip:"Edit selected record",icon:"w2ui-icon-pencil",batch:1,disabled:!0},delete:{type:"button",id:"w2ui-delete",text:"Delete",tooltip:"Delete selected records",icon:"w2ui-icon-cross",batch:!0,disabled:!0},save:{type:"button",id:"w2ui-save",text:"Save",tooltip:"Save changed records",icon:"w2ui-icon-check"}},this.operators={text:["is","begins","contains","ends"],number:["=","between",">","<",">=","<="],date:["is",{oper:"less",text:"before"},{oper:"more",text:"since"},"between"],list:["is"],hex:["is","between"],color:["is","begins","contains","ends"],enum:["in","not in"]},this.defaultOperator={text:"begins",number:"=",date:"is",list:"is",enum:"in",hex:"begins",color:"begins"},this.operatorsMap={text:"text",int:"number",float:"number",money:"number",currency:"number",percent:"number",hex:"hex",alphanumeric:"text",color:"color",date:"date",time:"date",datetime:"date",list:"list",combo:"text",enum:"enum",file:"enum",select:"list",radio:"list",checkbox:"list",toggle:"list"},this.onAdd=null,this.onEdit=null,this.onRequest=null,this.onLoad=null,this.onDelete=null,this.onSave=null,this.onSelect=null,this.onClick=null,this.onDblClick=null,this.onContextMenu=null,this.onContextMenuClick=null,this.onColumnClick=null,this.onColumnDblClick=null,this.onColumnResize=null,this.onColumnAutoResize=null,this.onSort=null,this.onSearch=null,this.onSearchOpen=null,this.onChange=null,this.onRestore=null,this.onExpand=null,this.onCollapse=null,this.onError=null,this.onKeydown=null,this.onToolbar=null,this.onColumnOnOff=null,this.onCopy=null,this.onPaste=null,this.onSelectionExtend=null,this.onEditField=null,this.onRender=null,this.onRefresh=null,this.onReload=null,this.onResize=null,this.onDestroy=null,this.onStateSave=null,this.onStateRestore=null,this.onFocus=null,this.onBlur=null,this.onReorderRow=null,this.onSearchSave=null,this.onSearchRemove=null,this.onSearchSelect=null,this.onColumnSelect=null,this.onColumnDragStart=null,this.onColumnDragEnd=null,this.onResizerDblClick=null,this.onMouseEnter=null,this.onMouseLeave=null,w2utils.extend(this,e),Array.isArray(this.records)){let i=[];this.records.forEach((e,t)=>{null!=e[this.recid]&&(e.recid=e[this.recid]),null==e.recid&&console.log("ERROR: Cannot add records without recid. (obj: "+this.name+")"),e.w2ui&&!0===e.w2ui.summary&&(this.summary.push(e),i.push(t))}),i.sort();for(let e=i.length-1;0<=e;e--)this.records.splice(i[e],1)}Array.isArray(this.columns)&&this.columns.forEach((i,e)=>{i=w2utils.extend({},this.colTemplate,i);e=(this.columns[e]=i).searchable;if(null!=e&&!1!==e&&null==this.getSearch(i.field))if(w2utils.isPlainObject(e))this.addSearch(w2utils.extend({field:i.field,label:i.text,type:"text"},e));else{let e=i.searchable,t="";!0===i.searchable&&(e="text",t='size="20"'),this.addSearch({field:i.field,label:i.text,type:e,attr:t})}}),Array.isArray(this.defaultSearches)&&this.defaultSearches.forEach((e,t)=>{e.id="default-"+t,e.icon??="w2ui-icon-search"});e=this.cache("searches");Array.isArray(e)&&e.forEach(e=>{this.savedSearches.push({id:e.id??"none",text:e.text??"none",icon:"w2ui-icon-search",remove:!0,logic:e.logic??"AND",data:e.data??[]})}),"string"==typeof this.box&&(this.box=query(this.box).get(0)),this.box&&this.render(this.box)}add(t,i){Array.isArray(t)||(t=[t]);let s=0;for(let e=0;ethis.records.length&&(a=this.records.length);for(let i=n;i{this.columns.forEach(i=>{if(i.field==e){let t=w2utils.clone(s);Object.keys(t).forEach(e=>{"function"==typeof t[e]&&(t[e]=t[e](i)),i[e]!=t[e]&&l++}),w2utils.extend(i,t)}})}),0{if(!(e.w2ui&&null!=e.w2ui.parent_recid||t.w2ui&&null!=t.w2ui.parent_recid))return o(e,t);var i=n(e),s=n(t);for(let e=0;es.length?1:i.length{this.status(w2utils.lang("Sorting took ${count} seconds",{count:e/1e3}))},10),e;function n(e){var t;return e.w2ui&&null!=e.w2ui.parent_recid?e.w2ui._path||((t=a.get(e.w2ui.parent_recid))?n(t).concat(e):(console.log("ERROR: no parent record: "+e.w2ui.parent_recid),[e])):[e]}function o(s,l){if(s===l)return 0;for(let i=0;it.constructor.name?s:-s;e&&"object"==typeof e&&(e=e.valueOf()),t&&"object"==typeof t&&(t=t.valueOf());var r={}.toString;switch(e&&"object"==typeof e&&e.toString!=r&&(e=String(e)),t&&"object"==typeof t&&t.toString!=r&&(t=String(t)),"string"==typeof e&&(e=e.toLowerCase().trim()),"string"==typeof t&&(t=t.toLowerCase().trim()),l){case"natural":l=w2utils.naturalCompare;break;case"i18n":l=w2utils.i18nCompare}return"function"==typeof l?l(e,t)*s:t=parseFloat(a)&&parseFloat(c.parseField(l,s.field))<=parseFloat(o)&&r++:"date"==s.type?(h=c.parseField(l,s.field+"_")instanceof Date?c.parseField(l,s.field+"_"):c.parseField(l,s.field),n=w2utils.isDate(h,w2utils.settings.dateFormat,!0),a=w2utils.isDate(a,w2utils.settings.dateFormat,!0),null!=(o=w2utils.isDate(o,w2utils.settings.dateFormat,!0))&&(o=new Date(o.getTime()+864e5)),n>=a&&n=a&&n=a&&n=":d=!0;case">":case"more":-1!=["int","float","money","currency","percent"].indexOf(s.type)?(n=parseFloat(c.parseField(l,s.field)),a=parseFloat(i.value),(n>a||d&&n===a)&&r++):"date"==s.type?(h=c.parseField(l,s.field+"_")instanceof Date?c.parseField(l,s.field+"_"):c.parseField(l,s.field),n=w2utils.isDate(h,w2utils.settings.dateFormat,!0),a=w2utils.isDate(a,w2utils.settings.dateFormat,!0),(n>a||d&&n===a)&&r++):"time"==s.type?(h=c.parseField(l,s.field+"_")instanceof Date?c.parseField(l,s.field+"_"):c.parseField(l,s.field),n=w2utils.formatTime(h,"hh24:mi"),a=w2utils.formatTime(a,"hh24:mi"),(n>a||d&&n===a)&&r++):"datetime"==s.type&&(h=c.parseField(l,s.field+"_")instanceof Date?c.parseField(l,s.field+"_"):c.parseField(l,s.field),n=w2utils.formatDateTime(h,"yyyy-mm-dd|hh24:mm:ss"),a=w2utils.formatDateTime(w2utils.isDateTime(a,w2utils.settings.datetimeFormat,!0),"yyyy-mm-dd|hh24:mm:ss"),n.length==a.length&&(n>a||d&&n===a)&&r++);break;case"in":h=i.value,-1===(h=i.svalue?i.svalue:h).indexOf(w2utils.isFloat(t)?parseFloat(t):t)&&-1===h.indexOf(n)||r++;break;case"not in":h=i.value,-1===(h=i.svalue?i.svalue:h).indexOf(w2utils.isFloat(t)?parseFloat(t):t)&&-1===h.indexOf(n)&&r++;break;case"begins":case"begins with":0===n.indexOf(a)&&r++;break;case"contains":0<=n.indexOf(a)&&r++;break;case"null":null==c.parseField(l,s.field)&&r++;break;case"not null":null!=c.parseField(l,s.field)&&r++;break;case"ends":case"ends with":let e=n.lastIndexOf(a);-1!==e&&e==n.length-a.length&&r++}}}if("OR"==c.last.logic&&0!==r||"AND"==c.last.logic&&r==c.searchData.length)return!0;if(l.w2ui&&l.w2ui.children&&!0!==l.w2ui.expanded)for(let t=0;tthis.records.length&&(i=this.records.length-s),0{this.status(w2utils.lang("Search took ${count} seconds",{count:e/1e3}))},10),e}}getRangeData(e,i){var s=this.get(e[0].recid,!0),l=this.get(e[1].recid,!0),r=e[0].column,n=e[1].column,a=[];if(r==n)for(let e=s;e<=l;e++){var t=this.records[e],o=t[this.columns[r].field]||null;a.push(!0!==i?o:{data:o,column:r,index:e,record:t})}else if(s==l){var h=this.records[s];for(let e=r;e<=n;e++){var d=h[this.columns[e].field]||null;a.push(!0!==i?d:{data:d,column:e,index:s,record:h})}}else for(let t=s;t<=l;t++){var u=this.records[t];a.push([]);for(let e=r;e<=n;e++){var c=u[this.columns[e].field];!0!==i?a[a.length-1].push(c):a[a.length-1].push({data:c,column:e,index:t,record:u})}}return a}addRange(s){let e=0,l,r;if("row"!=this.selectType){Array.isArray(s)||(s=[s]);for(let i=0;ithis.last.colStart&&(e=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(u.recid)+' td[col="start"]')),u.columnthis.last.colEnd&&(t=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(c.recid)+' td[col="end"]'),l='"end"');var p=parseInt(query(this.box).find("#grid_"+this.name+"_rec_top").next().attr("index")),f=parseInt(query(this.box).find("#grid_"+this.name+"_rec_bottom").prev().attr("index")),m=parseInt(query(this.box).find("#grid_"+this.name+"_frec_top").next().attr("index")),g=parseInt(query(this.box).find("#grid_"+this.name+"_frec_bottom").prev().attr("index"));0===e.length&&u.indexp&&(e=query(this.box).find("#grid_"+this.name+"_rec_top").next().find('td[col="'+u.column+'"]')),0===t.length&&c.index>f&&u.indexm&&(i=query(this.box).find("#grid_"+this.name+"_frec_top").next().find('td[col="'+u.column+'"]')),0===s.length&&c.index>g&&u.index'+("selection"==d.name?'
      ':"")+""),n=query(this.box).find("#grid_"+this.name+"_f"+d.name)):(n.attr("style",d.style),n.find(".w2ui-selection-resizer").show()),0===s.length&&(0===(s=query(this.box).find("#grid_"+this.name+"_frec_"+w2utils.escapeId(c.recid)+" td:last-child")).length&&(s=query(this.box).find("#grid_"+this.name+"_frec_bottom td:first-child")),n.css("border-right","0px"),n.find(".w2ui-selection-resizer").hide()),null!=u.recid&&null!=c.recid&&0'+("selection"==d.name?'
      ':"")+""),n=query(this.box).find("#grid_"+this.name+"_"+d.name)):n.attr("style",d.style),0===e.length&&0===(e=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(u.recid)+" td:first-child")).length&&(e=query(this.box).find("#grid_"+this.name+"_rec_top td:first-child")),0!==s.length&&n.css("border-left","0px"),null!=u.recid&&null!=c.recid&&0{e=this.trigger("resizerDblClick",{target:this.name,originalEvent:e});!0!==e.isCancelled&&e.finish()});let a={target:this.name,originalRange:null,newRange:null};return Date.now()-e;function i(s){var l=r.last.move;if(l&&"expand"==l.type){l.divX=s.screenX-l.x,l.divY=s.screenY-l.y;let e,t,i=s.target;"TD"!=i.tagName.toUpperCase()&&(i=query(i).closest("td")[0]),null!=(t=null!=query(i).attr("col")?parseInt(query(i).attr("col")):t)&&(i=query(i).closest("tr")[0],e=r.records[query(i).attr("index")].recid,l.newRange[1].recid==e&&l.newRange[1].column==t||(s=w2utils.clone(l.newRange),l.newRange=[{recid:l.recid,column:l.column},{recid:e,column:t}],a.detail&&(a.detail.newRange=w2utils.clone(l.newRange),a.detail.originalRange=w2utils.clone(l.originalRange)),!0===(a=r.trigger("selectionExtend",a)).isCancelled?(l.newRange=s,a.detail.newRange=s):(r.removeRange("grid-selection-expand"),r.addRange({name:"grid-selection-expand",range:l.newRange,style:"background-color: rgba(100,100,100,0.1); border: 2px dotted rgba(100,100,100,0.5);"}))))}}function s(e){r.removeRange("grid-selection-expand"),delete r.last.move,query("body").off(".w2ui-"+r.name),a.finish&&a.finish()}}}select(){if(0===arguments.length)return 0;let s=0;var l=this.last.selection;this.multiSelect||this.selectNone(!0);let t=Array.from(arguments);Array.isArray(t[0])&&(t=t[0]);var e={target:this.name},e=(1==t.length?(e.multiple=!1,w2utils.isPlainObject(t[0])?e.clicked={recid:t[0].recid,column:t[0].column}:e.recid=t[0]):(e.multiple=!0,e.clicked={recids:t}),this.trigger("select",e));if(!0===e.isCancelled)return 0;if("row"==this.selectType)for(let e=0;e=this.last.range_start&&r+1<=this.last.range_end)&&(e=query(this.box).find("#grid_"+this.name+"_frec_"+w2utils.escapeId(i)),t=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(i))),"row"==this.selectType&&-1==l.indexes.indexOf(r)&&(l.indexes.push(r),e&&t&&(e.addClass("w2ui-selected").find(".w2ui-col-number").addClass("w2ui-row-selected"),t.addClass("w2ui-selected").find(".w2ui-col-number").addClass("w2ui-row-selected"),e.find(".w2ui-grid-select-check").prop("checked",!0)),s++)}}else{var n={};for(let e=0;e=this.last.range_start&&u+1<=this.last.range_end&&(t=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(h)),i=query(this.box).find("#grid_"+this.name+"_frec_"+w2utils.escapeId(h)));var c=l.columns[u]||[];-1==l.indexes.indexOf(u)&&l.indexes.push(u);for(let e=0;ee-t);for(let e=0;ee-t);var f=0 td[col="${h}"]`).removeClass("w2ui-selected w2ui-inactive"),query(this.box).find(`#grid_${this.name}_frec_${w2utils.escapeId(r)} > td[col="${h}"]`).removeClass("w2ui-selected w2ui-inactive");let t=!1,i=!1;var d=this.getSelection();for(let e=0;e{i(t,""),Array.isArray(t.items)&&t.items.forEach(e=>{i(e,t.id+":")})}),this.show.toolbarSave&&(0{this.initSearches(),this.last.search_opened=!0;let t=query(`#w2overlay-${this.name}-search-overlay`);t.data("gridName",this.name).off(".grid-search").on("click.grid-search",()=>{t.find("input, select").each(e=>{e=query(e).data("tooltipName");e&&e.forEach(e=>{w2tooltip.hide(e)})})}),w2utils.bindEvents(t.find("select, input, button"),this);var i=query(`#w2overlay-${this.name}-search-overlay *[rel=search]`);0{t.removeClass("checked"),this.last.search_opened=!1})}}}searchClose(){w2tooltip.hide(this.name+"-search-overlay")}searchFieldTooltip(e,t,i){var e=this.searches[e],s=this.searchData[t];let l=s.operator,r=("less"==(l="more"==l&&"date"==s.type?"since":l)&&"date"==s.type&&(l="before"),""),n=s.value;Array.isArray(s.value)?(s.value.forEach(e=>{r+=`${e.text||e}`}),"date"==s.type&&(r="",s.value.forEach(e=>{r+=`${w2utils.formatDate(e)}`}))):"date"==s.type&&(n=w2utils.formatDateTime(n)),w2tooltip.hide(this.name+"-search-props"),w2tooltip.show({name:this.name+"-search-props",anchor:i,class:"w2ui-white",hideOn:"doc-click",html:` -
      - ${e.label} - ${w2utils.lang(l)} - ${Array.isArray(s.value)?""+r:`${n}`} -
      - -
      -
      `}).then(e=>{query(e.detail.overlay.box).find("#remove").on("click",()=>{this.searchData.splice(""+t,1),this.reload(),this.localSearch(),w2tooltip.hide(this.name+"-search-props")})})}searchSuggest(e,t,i){clearTimeout(this.last.kbd_timer),clearTimeout(this.last.overlay_timer),this.searchShowFields(!0),this.searchClose(),!0===t?w2tooltip.hide(this.name+"-search-suggest"):0${t}`:t}}).select(e=>{var t=this.trigger("searchSelect",{target:this.name,index:e.detail.index,item:e.detail.item});!0===t.isCancelled?e.preventDefault():(e.detail.overlay.hide(),this.last.logic=e.detail.item.logic||"AND",this.last.search="",this.last.label="[Multiple Fields]",this.searchData=w2utils.clone(e.detail.item.data),this.searchSelected=w2utils.clone(e.detail.item,{exclude:["icon","remove"]}),this.reload(),t.finish())}).remove(e=>{let i=e.detail.item,s=this.trigger("searchRemove",{target:this.name,index:e.detail.index,item:i});!0===s.isCancelled?e.preventDefault():(e.detail.overlay.hide(),this.confirm(w2utils.lang('Do you want to delete search "${item}"?',{item:i.text})).yes(e=>{var t=this.savedSearches.findIndex(e=>e.id==i.id);-1!==t&&this.savedSearches.splice(t,1),this.cacheSave("searches",this.savedSearches.map(e=>w2utils.clone(e,{exclude:["remove","icon"]}))),e.detail.self.close(),s.finish()}).no(e=>{e.detail.self.close()}))})):this.last.overlay_timer=setTimeout(()=>{this.searchSuggest(!0)},100))}searchSave(){let e="",t=(this.searchSelected&&(e=this.searchSelected.text),this.savedSearches.findIndex(e=>e.id==this.searchSelected?.id)),s=this.trigger("searchSave",{target:this.name,saveLocalStorage:!0});!0!==s.isCancelled&&this.message({width:350,height:150,body:``,buttons:` - - - `}).open(async i=>{query(i.detail.box).find("input, button").eq(0).val(e),await i.complete,query(i.detail.box).find("#grid-search-cancel").on("click",()=>{this.message()}),query(i.detail.box).find("#grid-search-save").on("click",()=>{var e=query(i.detail.box).find(".w2ui-message .search-name").val();this.searchSelected&&-1!=t?Object.assign(this.savedSearches[t],{id:e,text:e,logic:this.last.logic,data:w2utils.clone(this.searchData)}):this.savedSearches.push({id:e,text:e,icon:"w2ui-icon-search",remove:!0,logic:this.last.logic,data:this.searchData}),this.cacheSave("searches",this.savedSearches.map(e=>w2utils.clone(e,{exclude:["remove","icon"]}))),this.message(),(this.searchSelected?(this.searchSelected.text=e,query(this.box).find(`#grid_${this.name}_search_name .name-text`)):(this.searchSelected={text:e,logic:this.last.logic,data:w2utils.clone(this.searchData)},query(i.detail.box).find(`#grid_${this.name}_search_all`).val(" ").prop("readOnly",!0),query(i.detail.box).find(`#grid_${this.name}_search_name`).show().find(".name-text"))).html(e),s.finish({name:e})}),query(i.detail.box).find("input, button").off(".message").on("keydown.message",e=>{var t=String(query(i.detail.box).find(".w2ui-message-body input").val()).trim();13==e.keyCode&&""!=t&&query(i.detail.box).find("#grid-search-save").trigger("click"),27==e.keyCode&&this.message()}).eq(0).on("input.message",e=>{var t=query(i.detail.box).closest(".w2ui-message").find("#grid-search-save");""===String(query(i.detail.box).val()).trim()?t.prop("disabled",!0):t.prop("disabled",!1)}).get(0).focus()})}cache(e){if(w2utils.hasLocalStorage&&this.useLocalStorage)try{var t=JSON.parse(localStorage.w2ui||"{}");return t[this.stateId||this.name]??={},t[this.stateId||this.name][e]}catch(e){}return null}cacheSave(e,t){if(w2utils.hasLocalStorage&&this.useLocalStorage)try{var i=JSON.parse(localStorage.w2ui||"{}");return i[this.stateId||this.name]??={},i[this.stateId||this.name][e]=t,localStorage.w2ui=JSON.stringify(i),!0}catch(e){delete localStorage.w2ui}return!1}searchReset(e){var t=[];let i=!1;for(let e=0;e=this.searches.length?(this.last.field="",this.last.label=""):(this.last.field=this.searches[e].field,this.last.label=this.searches[e].label)}this.last.multi=!1,this.last.fetch.offset=0,this.last.scrollTop=0,this.last.scrollLeft=0,this.last.selection.indexes=[],this.last.selection.columns={},this.searchClose();l=l.val("").get(0);l?._w2field&&l._w2field.reset(),e||this.reload(),s.finish()}}searchShowFields(e){if(!0===e)w2tooltip.hide(this.name+"-search-fields");else{var l=[];for(let s=-1;s",e),e.label=e.caption),l.push({id:e.field,text:w2utils.lang(e.label),search:e,tooltip:i,disabled:t,checked:e.field==this.last.field})}w2menu.show({type:"radio",name:this.name+"-search-fields",anchor:query(this.box).find("#grid_"+this.name+"_search_name").parent().find(".w2ui-search-down").get(0),items:l,align:"none",hideOn:["doc-click","select"]}).select(e=>{this.searchInitInput(e.detail.item.search.field)})}}searchInitInput(e,t){let i;var s=query(this.box).find("#grid_"+this.name+"_search_all");if("all"==e)i={field:"all",label:w2utils.lang("All Fields")};else if(null==(i=this.getSearch(e)))return;""!=this.last.search?(this.last.label=i.label,this.search(i.field,this.last.search)):(this.last.field=i.field,this.last.label=i.label),s.attr("placeholder",w2utils.lang("Search")+" "+w2utils.lang(i.label||i.caption||i.field,!0))}clear(e){this.total=0,this.records=[],this.summary=[],this.last.fetch.offset=0,this.last.idCache={},this.last.selection={indexes:[],columns:{}},this.reset(!0),e||this.refresh()}reset(e){this.last.scrollTop=0,this.last.scrollLeft=0,this.last.range_start=null,this.last.range_end=null,query(this.box).find(`#grid_${this.name}_records`).prop("scrollTop",0),e||this.refresh()}skip(e,t){this.url?.get??this.url?(this.offset=parseInt(e),this.offset>this.total&&(this.offset=this.total-this.limit),(this.offset<0||!w2utils.isInt(this.offset))&&(this.offset=0),this.clear(!0),this.reload(t)):console.log("ERROR: grid.skip() can only be called when you have remote data source.")}load(e,t){return null==e?(console.log('ERROR: You need to provide url argument when calling .load() method of "'+this.name+'" object.'),new Promise((e,t)=>{t()})):(this.clear(!0),this.request("load",{},e,t))}reload(e){let t=this;var i=this.url?.get??this.url;return t.selectionSave(),i?this.load(i,()=>{t.selectionRestore(),"function"==typeof e&&e()}):(this.reset(!0),this.localSearch(),this.selectionRestore(),"function"==typeof e&&e({status:"success"}),new Promise(e=>{e()}))}prepareParams(i,e){var t=this.dataType??w2utils.settings.dataType;let s=e.body;switch(t){case"HTTPJSON":s={request:s},["PUT","DELETE"].includes(e.method)&&(e.method="POST"),l();break;case"HTTP":["PUT","DELETE"].includes(e.method)&&(e.method="POST"),l();break;case"RESTFULL":["PUT","DELETE"].includes(e.method)?e.headers["Content-Type"]="application/json":l();break;case"JSON":"GET"==e.method?(s={request:s},l()):(e.headers["Content-Type"]="application/json",e.method="POST")}return e.body="string"==typeof e.body?e.body:JSON.stringify(e.body),e;function l(){Object.keys(s).forEach(e=>{let t=s[e];"object"==typeof t&&(t=JSON.stringify(t)),i.searchParams.append(e,t)}),delete e.body}}request(i,e,t,s){let l=this,r,n;var a=new Promise((e,t)=>{r=e,n=t});if(null==e&&(e={}),!(t=t||this.url))return new Promise((e,t)=>{t()});w2utils.isInt(this.offset)||(this.offset=0),w2utils.isInt(this.last.fetch.offset)||(this.last.fetch.offset=0);let o;var h={limit:this.limit,offset:parseInt(this.offset)+parseInt(this.last.fetch.offset),searchLogic:this.last.logic,search:this.searchData.map(e=>{e=w2utils.clone(e);return this.searchMap&&this.searchMap[e.field]&&(e.field=this.searchMap[e.field]),e}),sort:this.sortData.map(e=>{e=w2utils.clone(e);return this.sortMap&&this.sortMap[e.field]&&(e.field=this.sortMap[e.field]),e})};if(0===this.searchData.length&&(delete h.search,delete h.searchLogic),0===this.sortData.length&&delete h.sort,w2utils.extend(h,this.postData),w2utils.extend(h,e),"delete"!=i&&"save"!=i||(delete h.limit,delete h.offset,"delete"==(h.action=i)&&(h[this.recid||"recid"]=this.getSelection())),"load"==i){if(!0===(o=this.trigger("request",{target:this.name,url:t,postData:h,httpMethod:"GET",httpHeaders:this.httpHeaders})).isCancelled)return new Promise((e,t)=>{t()})}else o={detail:{url:t,postData:h,httpMethod:"save"==i?"PUT":"DELETE",httpHeaders:this.httpHeaders}};if(0===this.last.fetch.offset&&this.lock(w2utils.lang(this.msgRefresh),!0),this.last.fetch.controller)try{this.last.fetch.controller.abort()}catch(e){}switch(t=o.detail.url,i){case"save":t?.save&&(t=t.save);break;case"delete":t?.remove&&(t=t.remove);break;default:t=t?.get??t}if(0{null!=e&&(200!=e?.status?u(e??{}):(l.unlock(),e.json().catch(u).then(e=>{this.requestComplete(e,i,s,r,n)})))}),"load"==i&&o.finish(),a;function u(e){var t;"AbortError"!==e?.name&&(l.unlock(),!0!==(t=l.trigger("error",{response:e,lastFetch:l.last.fetch})).isCancelled&&(e.status&&200!=e.status?l.error(e.status+": "+e.statusText):(console.log("ERROR: Server communication failed.","\n EXPECTED:",{total:5,records:[{recid:1,field:"value"}]},"\n OR:",{error:!0,message:"error message"}),l.requestComplete({error:!0,message:"HTTP Request error",response:e},i,s,r,n)),t.finish()))}}requestComplete(e,t,i,s,l){let r=e.error??!1,n=(null==e.error&&"error"===e.status&&(r=!0),this.last.fetch.response=(Date.now()-this.last.fetch.start)/1e3,setTimeout(()=>{this.show.statusResponse&&this.status(w2utils.lang("Server Response ${count} seconds",{count:this.last.fetch.response}))},10),this.last.pull_more=!1,this.last.pull_refresh=!0,"load");"save"==this.last.fetch.action&&(n="save"),"delete"==this.last.fetch.action&&(n="delete");var a=this.trigger(n,{target:this.name,error:r,data:e,lastFetch:this.last.fetch});if(!0===a.isCancelled)l();else{if(r)e={error:r,data:e,message:w2utils.lang(this.msgHTTPError)},this.error(w2utils.lang(this.msgHTTPError)),l(e);else if("function"==typeof this.parser?"object"!=typeof(e=this.parser(e))&&console.log("ERROR: Your parser did not return proper object"):null==e?e={error:!0,message:w2utils.lang(this.msgNotJSON)}:Array.isArray(e)&&(e={error:r,records:e,total:e.length}),e.error)this.error(e.message);else if("load"==t){if(null==e.total&&(e.total=-1),null==e.records&&(e.records=[]),e.records.length==this.limit?(l=this.records.length+e.records.length,this.last.fetch.hasMore=l!=this.total):(this.last.fetch.hasMore=!1,this.total=this.offset+this.last.fetch.offset+e.records.length),this.last.fetch.hasMore||query(this.box).find("#grid_"+this.name+"_rec_more, #grid_"+this.name+"_frec_more").hide(),0===this.last.fetch.offset)this.records=[],this.summary=[];else if(-1!=e.total&&parseInt(e.total)!=parseInt(this.total)){let e=this;return this.message(w2utils.lang(this.msgNeedReload)).ok(()=>{delete e.last.fetch.offset,e.reload()}),new Promise(e=>{e()})}w2utils.isInt(e.total)&&(this.total=parseInt(e.total)),e.records&&e.records.forEach(e=>{this.recid&&(e.recid=this.parseField(e,this.recid)),null==e.recid&&(e.recid="recid-"+this.records.length),(e.w2ui&&!0===e.w2ui.summary?this.summary:this.records).push(e)}),e.summary&&(this.summary=[],e.summary.forEach(e=>{this.recid&&(e.recid=this.parseField(e,this.recid)),null==e.recid&&(e.recid="recid-"+this.summary.length),this.summary.push(e)}))}else if("delete"==t)return this.reset(),this.reload();(this.url?.get??this.url)||(this.localSort(),this.localSearch()),this.total=parseInt(this.total),0===this.last.fetch.offset?this.refresh():(this.scroll(),this.resize()),"function"==typeof i&&i(e),s(e),a.finish(),this.last.fetch.loaded=!0}}error(e){var t=this.trigger("error",{target:this.name,message:e});!0!==t.isCancelled&&(this.message(e),t.finish())}getChanges(t){var i=[];void 0===t&&(t=this.records);for(let e=0;e{e.error||this.mergeChanges(),s.finish(),"function"==typeof t&&t(e)}):(this.mergeChanges(),s.finish()))}editField(d,u,c,p){let f=this;if(!0===this.last.inEditMode)p&&13==p.keyCode?({index:m,column:g,value:y}=this.last._edit,this.editChange({type:"custom",value:y},m,g,p),this.editDone(m,g,p)):0<(y=query(this.box).find("div.w2ui-edit-box .w2ui-input")).length&&("DIV"==y.get(0).tagName?(y.text(y.text()+c),w2utils.setCursorPosition(y.get(0),y.text().length)):(y.val(y.val()+c),w2utils.setCursorPosition(y.get(0),y.val().length)));else{let o=this.get(d,!0),h=this.getCellEditable(o,u);if(h&&!["checkbox","check"].includes(h.type)){let n=this.records[o],a=this.columns[u];var m=!0===a.frozen?"_f":"_";if(-1!=["list","enum","file"].indexOf(h.type))console.log('ERROR: input types "list", "enum" and "file" are not supported in inline editing.');else{var g=this.trigger("editField",{target:this.name,recid:d,column:u,value:c,index:o,originalEvent:p});if(!0!==g.isCancelled){c=g.detail.value,this.last.inEditMode=!0,this.last.editColumn=u,this.last._edit={value:c,index:o,column:u,recid:d},this.selectNone(!0),this.select({recid:d,column:u});var y=query(this.box).find("#grid_"+this.name+m+"rec_"+w2utils.escapeId(d));let e=y.find('[col="'+u+'"] > div'),t=(this.last._edit.tr=y,this.last._edit.div=e,query(this.box).find("div.w2ui-edit-box").remove(),"row"!=this.selectType&&(query(this.box).find("#grid_"+this.name+m+"selection").attr("id","grid_"+this.name+"_editable").removeClass("w2ui-selection").addClass("w2ui-edit-box").prepend('
      ').find(".w2ui-selection-resizer").remove(),e=query(this.box).find("#grid_"+this.name+"_editable > div:first-child")),h.attr=h.attr??"",h.text=h.text??"",h.style=h.style??"",h.items=h.items??[],null!=n.w2ui?.changes?.[a.field]?w2utils.stripTags(n.w2ui.changes[a.field]):w2utils.stripTags(f.parseField(n,a.field))),i="object"!=typeof(t=null==t?"":t)?t:"",s=(null!=g.detail.prevValue&&(i=g.detail.prevValue),null!=c&&(t=c),null!=a.style?a.style+";":"");"string"==typeof a.render&&["number","int","float","money","percent","size"].includes(a.render.split(":")[0])&&(s+="text-align: right;"),0 div').get(0)),m=`font-family: ${p["font-family"]}; font-size: ${p["font-size"]};`;function w(e){try{var t=getComputedStyle(e),i="DIV"==e.tagName.toUpperCase()?e.innerText:e.value,s=query(f.box).find("#grid_"+f.name+"_editable").get(0),l=`font-family: ${t["font-family"]}; font-size: ${t["font-size"]}; white-space: no-wrap;`,r=w2utils.getStrWidth(i,l);r+20>s.clientWidth&&query(s).css("width",r+20+"px")}catch(e){}}"div"===h.type?(e.addClass("w2ui-editable").html(w2utils.stripSpaces(`
      -
      `+h.text)),(l=e.find("div.w2ui-input").get(0)).innerText="object"!=typeof t?t:"",null!=c?w2utils.setCursorPosition(l,l.innerText.length):w2utils.setCursorPosition(l,0,l.innerText.length)):(e.addClass("w2ui-editable").html(w2utils.stripSpaces(``+h.text)),l=e.find("input").get(0),"number"==h.type&&(t=w2utils.formatNumber(t)),"date"==h.type&&(t=w2utils.formatDate(w2utils.isDate(t,h.format,!0)||new Date,h.format)),l.value="object"!=typeof t?t:"",y=e=>{var t=this.last._edit?.escKey;let i=!1;var s=query(l).data("tooltipName");s&&null!=w2tooltip.get(s[0])?.selected&&(i=!0),!this.last.inEditMode||t||!r.includes(h.type)||e.detail.overlay.anchor?.id!=this.last._edit.input?.id&&"list"!=h.type||(this.editChange(),this.editDone(void 0,void 0,{keyCode:i?13:0}))},new w2field(w2utils.extend({},h,{el:l,selected:t,onSelect:y,onHide:y})),null==c&&l&&l.select()),Object.assign(this.last._edit,{input:l,edit:h}),query(l).off(".w2ui-editable").on("blur.w2ui-editable",e=>{var t,i;this.last.inEditMode&&(t=this.last._edit.edit.type,i=query(l).data("tooltipName"),r.includes(t)&&i||(this.editChange(l,o,u,e),this.editDone()))}).on("mousedown.w2ui-editable",e=>{e.stopPropagation()}).on("click.w2ui-editable",e=>{w.call(l,e)}).on("paste.w2ui-editable",e=>{e.preventDefault();e=e.clipboardData.getData("text/plain");document.execCommand("insertHTML",!1,e)}).on("keyup.w2ui-editable",e=>{w.call(l,e)}).on("keydown.w2ui-editable",i=>{switch(i.keyCode){case 8:"list"!=h.type||l._w2field||i.preventDefault();break;case 9:case 13:i.preventDefault();break;case 27:var e=query(l).data("tooltipName");e&&0{switch(i.keyCode){case 9:var e=i.shiftKey?f.prevCell(o,u,!0):f.nextCell(o,u,!0);null!=e&&(t=f.records[e.index].recid,this.editChange(l,o,u,i),this.editDone(o,u,i),"row"!=f.selectType?(f.selectNone(!0),f.select({recid:t,column:e.colIndex})):f.editField(t,e.colIndex,null,i),i.preventDefault&&i.preventDefault());break;case 13:{let e=!1;var t=query(l).data("tooltipName");t&&null!=w2tooltip.get(t[0]).selected&&(e=!0),t&&e||(this.editChange(l,o,u,i),this.editDone(o,u,i));break}case 27:{this.last._edit.escKey=!1;let e=f.parseField(n,a.field);null!=n.w2ui?.changes?.[a.field]&&(e=n.w2ui.changes[a.field]),null!=l._prevValue&&(e=l._prevValue),"DIV"==l.tagName?l.innerText=null!=e?e:"":l.value=null!=e?e:"",this.editDone(o,u,i),setTimeout(()=>{f.select({recid:d,column:u})},1);break}}w(l)},1)}),l&&(l._prevValue=i),setTimeout(()=>{this.last.inEditMode&&l&&(l.focus(),clearTimeout(this.last.kbd_timer),(l.resize=w)(l))},50),g.finish({input:l})}}}}}editChange(e,t,i,s){e=e??this.last._edit.input,t=t??this.last._edit.index,i=i??this.last._edit.column,s=s??{};var l=(t<0?this.summary:this.records)[t=t<0?-t-1:t],r=this.columns[i];let n="DIV"==e?.tagName?e.innerText:e.value;var a=e._w2field,o=(a&&("list"==a.type&&(n=a.selected),0!==Object.keys(n).length&&null!=n||(n=""),w2utils.isPlainObject(n)||(n=a.clean(n))),"checkbox"==e.type&&(l.w2ui&&!1===l.w2ui.editable&&(e.checked=!e.checked),n=e.checked),this.parseField(l,r.field)),h=l.w2ui&&l.w2ui.changes&&l.w2ui.changes.hasOwnProperty(r.field)?l.w2ui.changes[r.field]:o;let d={target:this.name,input:e,recid:l.recid,index:t,column:i,originalEvent:s,value:{new:n,previous:h,original:o}},u=(null!=s.target?._prevValue&&(d.value.previous=s.target._prevValue),0);for(;u<20;){if(u++,"object"!=typeof(n=d.value.new)&&String(o)!=String(n)||"object"==typeof n&&n&&n.id!=o&&("object"!=typeof o||null==o||n.id!=o.id)){if(!0!==(d=this.trigger("change",d)).isCancelled){if(n!==d.detail.value.new)continue;(""!==d.detail.value.new&&null!=d.detail.value.new||""!==h&&null!=h)&&(l.w2ui=l.w2ui??{},l.w2ui.changes=l.w2ui.changes??{},l.w2ui.changes[r.field]=d.detail.value.new),d.finish()}}else if(!0!==(d=this.trigger("restore",d)).isCancelled){if(n!==d.detail.value.new)continue;l.w2ui?.changes&&(delete l.w2ui.changes[r.field],0===Object.keys(l.w2ui.changes).length&&delete l.w2ui.changes),d.finish()}break}}editDone(t,i,s){if(t=t??this.last._edit.index,i=i??this.last._edit.column,s=s??{},this.advanceOnEdit&&13==s.keyCode){let e=s.shiftKey?this.prevRow(t,i,1):this.nextRow(t,i,1);null==e&&(e=t),setTimeout(()=>{"row"!=this.selectType?(this.selectNone(!0),this.select({recid:this.records[e].recid,column:i})):this.editField(this.records[e].recid,i,null,s)},1)}var e=t<0,l=query(this.last._edit.tr).find('[col="'+i+'"]'),r=this.records[t],n=this.columns[i];this.last.inEditMode=!1,this.last._edit=null,e||(null!=r.w2ui?.changes?.[n.field]?l.addClass("w2ui-changed"):l.removeClass("w2ui-changed"),l.replace(this.getCellHTML(t,i,e))),query(this.box).find("div.w2ui-edit-box").remove(),this.updateToolbar(),setTimeout(()=>{var e=query(this.box).find(`#grid_${this.name}_focus`).get(0);document.activeElement===e||this.last.inEditMode||e.focus()},10)}delete(e){var t=this.trigger("delete",{target:this.name,force:e});if(e&&this.message(),!0!==t.isCancelled){e=t.detail.force;var i=this.getSelection();if(0!==i.length)if(""==this.msgDelete||e){if("object"!=typeof this.url?this.url:this.url.remove)this.request("delete");else if("object"!=typeof i[0])this.selectNone(),this.remove.apply(this,i);else{for(let e=0;e{e.detail.self.close(),this.delete(!0)}).no(e=>{e.detail.self.close()})}}click(l,r){var n=Date.now();let a=null;if(!(1==this.last.cancelClick||r&&r.altKey))if("object"==typeof l&&null!==l&&(a=l.column,l=l.recid),null==r&&(r={}),n-parseInt(this.last.click_time)<350&&this.last.click_recid==l&&"click"==r.type)this.dblClick(l,r);else{this.last.bubbleEl&&(this.last.bubbleEl=null),this.last.click_time=n;n=this.last.click_recid;if(this.last.click_recid=l,null==a&&r.target){let e=r.target;"TD"!=e.tagName&&(e=query(e).closest("td")[0]),null!=query(e).attr("col")&&(a=parseInt(query(e).attr("col")))}var o=this.trigger("click",{target:this.name,recid:l,column:a,originalEvent:r});if(!0!==o.isCancelled){var h=this.getSelection(),d=(query(this.box).find("#grid_"+this.name+"_check_all").prop("checked",!1),this.get(l,!0)),u=[];this.last.sel_ind=d,this.last.sel_col=a,this.last.sel_recid=l,this.last.sel_type="click";let e,i,t,s;if(r.shiftKey&&0h[0].column?(t=h[0].column,a):(t=a,h[0].column);for(let e=t;e<=s;e++)u.push(e)}else e=this.get(n,!0),i=this.get(l,!0);var c=[],p=(e>i&&(n=e,e=i,i=n),this.url?.get?this.url.get:this.url);for(let t=e;t<=i;t++)if(!(0=this.records.length?this.selectNone():this.selectAll())}else if(!t.altKey||(l=this.getColumn(s))&&l.sortable&&this.sort(s,null,!(!t||!t.ctrlKey&&!t.metaKey)),"line-number"==e.detail.field)this.getSelection().length>=this.records.length?this.selectNone():this.selectAll();else{t.shiftKey||t.metaKey||t.ctrlKey||this.selectNone(!0);var l=this.getSelection(),s=this.getColumn(e.detail.field,!0),i=[],r=[];if(0!=l.length&&t.shiftKey){let t=s,i=l[0].column;t>i&&(t=l[0].column,i=s);for(let e=t;e<=i;e++)r.push(e)}else r.push(s);if(!0!==(e=this.trigger("columnSelect",{target:this.name,columns:r})).isCancelled){for(let e=0;e{var e=query(this.box).find(`#grid_${this.name}_focus`).get(0);e&&document.activeElement!=e&&e.focus()},10),e.finish()}blur(e){e=this.trigger("blur",{target:this.name,originalEvent:e});if(!0===e.isCancelled)return!1;this.hasFocus=!1,query(this.box).addClass("w2ui-inactive").find(".w2ui-selected").addClass("w2ui-inactive"),query(this.box).find(".w2ui-selection").addClass("w2ui-inactive"),e.finish()}keydown(c){let p=this,f="object"!=typeof this.url?this.url:this.url.get;if(!0===p.keyboard){var m=p.trigger("keydown",{target:p.name,originalEvent:c});if(!0!==m.isCancelled)if(0t&&p.last.sel_ind!=l?p.unselect(p.records[l].recid):p.select(p.records[t].recid);else if(p.last.sel_ind>t&&p.last.sel_ind!=l){t=l;var i=[];for(let e=0;e{var e=query(p.box).find("#grid_"+p.name+"_focus"),t=e.val();e.val(""),p.editField(n,a[0],t,c)},1)),d&&c.preventDefault&&c.preventDefault(),m.finish()}}}scrollIntoView(e,s,t,i){let l=this.records.length;if(0!==(l=0==this.searchData.length||this.url?l:this.last.searchIds.length)){if(null==e){var r=this.getSelection();if(0===r.length)return;w2utils.isPlainObject(r[0])?(e=r[0].index,s=r[0].column):e=this.get(r[0],!0)}var r=query(this.box).find(`#grid_${this.name}_records`),n=r[0].clientWidth,a=r[0].clientHeight,o=r[0].scrollTop,h=r[0].scrollLeft,d=this.last.searchIds.length;if(0{clearTimeout(this.last.kbd_timer),this.contextMenuClick(i,e)}),clearTimeout(this.last.kbd_timer)),l.preventDefault(),e.finish())}}contextMenuClick(e,t){e=this.trigger("contextMenuClick",{target:this.name,recid:e,originalEvent:t.detail.originalEvent,menuEvent:t,menuIndex:t.detail.index,menuItem:t.detail.item});!0!==e.isCancelled&&e.finish()}toggle(e){var t=this.get(e);if(null!=t)return t.w2ui=t.w2ui||{},!0===t.w2ui.expanded?this.collapse(e):this.expand(e)}expand(e,t){var i=this.get(e,!0);let s=this.records[i];s.w2ui=s.w2ui||{};var l=w2utils.escapeId(e),r=s.w2ui.children;let n;if(Array.isArray(r)){if(!0===s.w2ui.expanded||0===r.length)return!1;if(!0===(n=this.trigger("expand",{target:this.name,recid:e})).isCancelled)return!1;s.w2ui.expanded=!0,r.forEach(e=>{e.w2ui=e.w2ui||{},e.w2ui.parent_recid=s.recid,null==e.w2ui.children&&(e.w2ui.children=[])}),this.records.splice.apply(this.records,[i+1,0].concat(r)),-1!==this.total&&(this.total+=r.length),("object"!=typeof this.url?this.url:this.url.get)||(this.localSort(!0,!0),0 - -
      - - - `),query(this.box).find("#grid_"+this.name+"_frec_"+l).after(` - ${this.show.lineNumbers?'':""} - -
      - - `),!0===(n=this.trigger("expand",{target:this.name,recid:e,box_id:"grid_"+this.name+"_rec_"+e+"_expanded",fbox_id:"grid_"+this.name+"_frec_"+l+"_expanded"})).isCancelled)return query(this.box).find("#grid_"+this.name+"_rec_"+l+"_expanded_row").remove(),query(this.box).find("#grid_"+this.name+"_frec_"+l+"_expanded_row").remove(),!1;i=query(this.box).find("#grid_"+this.name+"_rec_"+e+"_expanded"),r=query(this.box).find("#grid_"+this.name+"_frec_"+e+"_expanded"),t=i.find(":scope div:first-child")[0]?.clientHeight??50;i[0].clientHeight{query(this.box).find("#grid_"+this.name+"_rec_"+e+"_expanded_row").remove(),query(this.box).find("#grid_"+this.name+"_frec_"+e+"_expanded_row").remove(),l.w2ui.expanded=!1,n.finish(),this.resizeRecords()},300)}return!0}sort(i,e,s){var t=this.trigger("sort",{target:this.name,field:i,direction:e,multiField:s});if(!0!==t.isCancelled){if(null!=i){let t=this.sortData.length;for(let e=0;ei&&(i=s[e].column),-1==r.indexOf(s[e].index)&&r.push(s[e].index);r.sort((e,t)=>e-t);for(let e=0;e div.w2ui-grid-box").css("width",query(this.box)[0].clientWidth+"px").css("height",query(this.box)[0].clientHeight+"px");var t=this.trigger("resize",{target:this.name});if(!0!==t.isCancelled)return this.resizeBoxes(),this.resizeRecords(),t.finish(),Date.now()-e}}update({cells:t,fullCellRefresh:i,ignoreColumns:e}={}){var s=Date.now();let u=this;if(null==this.box)return 0;if(Array.isArray(t))for(let e=0;e!!e);e.classList.forEach(e=>{t.includes(e)||i.push(e)}),e.classList.remove(...i),e.classList.add(...o)}}if(u.columns[t].style&&u.columns[t].style!=e.style.cssText&&(e.style.cssText=u.columns[t].style??""),null!=s.w2ui.class){if("string"==typeof s.w2ui.class){let t=["w2ui-odd","w2ui-even","w2ui-record"],i=[];n=s.w2ui.class.split(" ").filter(e=>!!e);l&&r&&(l.classList.forEach(e=>{t.includes(e)||i.push(e)}),l.classList.remove(...i),l.classList.add(...n),r.classList.remove(...i),r.classList.add(...n))}if(w2utils.isPlainObject(s.w2ui.class)&&"string"==typeof s.w2ui.class[a.field]){let t=["w2ui-grid-data"],i=[];h=s.w2ui.class[a.field].split(" ").filter(e=>!!e);e.classList.forEach(e=>{t.includes(e)||i.push(e)}),e.classList.remove(...i),e.classList.add(...h)}}null!=s.w2ui.style&&(l&&r&&"string"==typeof s.w2ui.style&&l.style.cssText!==s.w2ui.style&&(l.style.cssText="height: "+u.recordHeight+"px;"+s.w2ui.style,l.setAttribute("custom_style",s.w2ui.style),r.style.cssText="height: "+u.recordHeight+"px;"+s.w2ui.style,r.setAttribute("custom_style",s.w2ui.style)),w2utils.isPlainObject(s.w2ui.style)&&"string"==typeof s.w2ui.style[a.field]&&e.style.cssText!==s.w2ui.style[a.field]&&(e.style.cssText=s.w2ui.style[a.field]))}}}}refreshCell(e,t){var i=this.get(e,!0),t=this.getColumn(t,!0),e=!this.records[i]||this.records[i].recid!=e,s=query(this.box).find(`${e?".w2ui-grid-summary ":""}#grid_${this.name}_data_${i}_`+t);return 0!=s.length&&(s.replace(this.getCellHTML(i,t,e)),!0)}refreshRow(t,i=null){let s=query(this.box).find("#grid_"+this.name+"_frec_"+w2utils.escapeId(t)),l=query(this.box).find("#grid_"+this.name+"_rec_"+w2utils.escapeId(t));if(0{var t=[];for(let e=0;e{var t=query(this.box).find('td[col="'+e.col+'"]:not(.w2ui-head)');w2utils.marker(t,e.search)})},50),this.updateToolbar(),t.finish(),this.resize(),this.addRange("selection"),setTimeout(()=>{this.resize(),this.scroll()},1),this.reorderColumns&&!this.last.columnDrag?this.last.columnDrag=this.initColumnDrag():!this.reorderColumns&&this.last.columnDrag&&this.last.columnDrag.remove(),Date.now()-e}}}refreshSearch(){if(this.multiSearch&&0`);let r=` - -
      `;this.searchData.forEach((i,e)=>{var t=this.getSearch(i.field,!0),s=this.searches[t];let l;if(l=Array.isArray(i.value)?`${i.value.length}`:": "+i.value,s&&"date"==s.type)if("between"==i.operator){let e=i.value[0],t=i.value[1];Number(e)===e&&(e=w2utils.formatDate(e)),Number(t)===t&&(t=w2utils.formatDate(t)),l=`: ${e} - `+t}else{let e=i.value,t=(Number(e)==e&&(e=w2utils.formatDate(e)),i.operator);"more:"==(t="less"==(t="more"==t?"since":t)?"before":t).substr(0,5)&&(t="since"),l=`: ${t} `+e}r+=` - ${s?s.label:""} - ${l} - - `}),r+=` - ${this.show.searchSave?`
      - - `:""} - - `,query(this.box).find(`#grid_${this.name}_searches`).html(r),query(this.box).find(`#grid_${this.name}_search_logic`).html(w2utils.lang("AND"==this.last.logic?"All":"Any"))}else query(this.box).find(".w2ui-grid-toolbar").css("height",this.last.toolbar_height+"px").find(".w2ui-grid-searches").remove();this.searchSelected?(query(this.box).find(`#grid_${this.name}_search_all`).val(" ").prop("readOnly",!0),query(this.box).find(`#grid_${this.name}_search_name`).show().find(".name-text").html(this.searchSelected.text)):(query(this.box).find(`#grid_${this.name}_search_all`).prop("readOnly",!1),query(this.box).find(`#grid_${this.name}_search_name`).hide().find(".name-text").html("")),w2utils.bindEvents(query(this.box).find(`#grid_${this.name}_searches .w2ui-action, #grid_${this.name}_searches button`),this)}refreshBody(){this.scroll();var e=this.getRecordsHTML(),t=this.getColumnsHTML(),e='
      '+e[0]+'
      '+e[1]+'
      '+t[0]+'
      '+t[1]+"
      "+``;let l=query(this.box).find(`#grid_${this.name}_body`,this.box).html(e);t=query(this.box).find(`#grid_${this.name}_records`,this.box),e=query(this.box).find(`#grid_${this.name}_frecords`,this.box);"row"==this.selectType&&(t.on("mouseover mouseout",{delegate:"tr"},e=>{var t=query(e.delegate).attr("recid");query(this.box).find(`#grid_${this.name}_frec_`+w2utils.escapeId(t)).toggleClass("w2ui-record-hover","mouseover"==e.type)}),e.on("mouseover mouseout",{delegate:"tr"},e=>{var t=query(e.delegate).attr("recid");query(this.box).find(`#grid_${this.name}_rec_`+w2utils.escapeId(t)).toggleClass("w2ui-record-hover","mouseover"==e.type)})),w2utils.isIOS?t.append(e).on("click",{delegate:"tr"},e=>{var t=query(e.delegate).attr("recid");this.dblClick(t,e)}):t.add(e).on("click",{delegate:"tr"},e=>{var t=query(e.delegate).attr("recid");"-none-"!=t&&this.click(t,e)}).on("contextmenu",{delegate:"tr"},e=>{var t=query(e.delegate).attr("recid");this.showContextMenu(t,null,e)}).on("mouseover",{delegate:"tr"},e=>{this.last.rec_out=!1;let t=query(e.delegate).attr("index"),i=query(e.delegate).attr("recid");t!==this.last.rec_over&&(this.last.rec_over=t,setTimeout(()=>{delete this.last.rec_out,this.trigger("mouseEnter",{target:this.name,originalEvent:e,index:t,recid:i}).finish()}))}).on("mouseout",{delegate:"tr"},t=>{let i=query(t.delegate).attr("index"),s=query(t.delegate).attr("recid");this.last.rec_out=!0,setTimeout(()=>{let e=()=>{this.trigger("mouseLeave",{target:this.name,originalEvent:t,index:i,recid:s}).finish()};i!==this.last.rec_over&&e(),setTimeout(()=>{this.last.rec_out&&(delete this.last.rec_out,delete this.last.rec_over,e())})})}),l.data("scroll",{lastDelta:0,lastTime:0}).find(".w2ui-grid-frecords").on("mousewheel DOMMouseScroll ",e=>{e.preventDefault();var t=l.data("scroll"),i=l.find(".w2ui-grid-records"),e=null!=typeof e.wheelDelta?-e.wheelDelta:e.detail||e.deltaY,s=i.prop("scrollTop");t.lastDelta+=e,e=Math.round(t.lastDelta),l.data("scroll",t),i.get(0).scroll({top:s+e,behavior:"smooth"})}),t.off(".body-global").on("scroll.body-global",{delegate:".w2ui-grid-records"},e=>{this.scroll(e)}),query(this.box).find(".w2ui-grid-body").off(".body-global").on("click.body-global dblclick.body-global contextmenu.body-global",{delegate:"td.w2ui-head"},e=>{var t=query(e.delegate).attr("col"),i=this.columns[t]??{field:t};switch(e.type){case"click":this.columnClick(i.field,e);break;case"dblclick":this.columnDblClick(i.field,e);break;case"contextmenu":this.show.columnMenu&&(w2menu.show({type:"check",anchor:document.body,originalEvent:e,items:this.initColumnOnOff()}).then(()=>{query("#w2overlay-context-menu .w2ui-grid-skip").off(".w2ui-grid").on("click.w2ui-grid",e=>{e.stopPropagation()}).on("keypress",e=>{13==e.keyCode&&(this.skip(e.target.value),this.toolbar.click("w2ui-column-on-off"))})}).select(e=>{var t=e.detail.item.id;["w2ui-stateSave","w2ui-stateReset"].includes(t)?this[t.substring(5)]():"w2ui-skip"!=t&&this.columnOnOff(e,e.detail.item.id),clearTimeout(this.last.kbd_timer)}),clearTimeout(this.last.kbd_timer)),e.preventDefault()}}).on("mouseover.body-global",{delegate:".w2ui-col-header"},e=>{let t=query(e.delegate).parent().attr("col");this.columnTooltipShow(t,e),query(e.delegate).off(".tooltip").on("mouseleave.tooltip",()=>{this.columnTooltipHide(t,e)})}).on("click.body-global",{delegate:"input.w2ui-select-all"},e=>{e.delegate.checked?this.selectAll():this.selectNone(),e.stopPropagation(),clearTimeout(this.last.kbd_timer)}).on("click.body-global",{delegate:".w2ui-show-children, .w2ui-col-expand"},e=>{e.stopPropagation(),this.toggle(query(e.target).parents("tr").attr("recid"))}).on("click.body-global mouseover.body-global",{delegate:".w2ui-info"},e=>{var t=query(e.delegate).closest("td"),i=t.parent(),s=this.columns[t.attr("col")],l=i.parents(".w2ui-grid-body").hasClass("w2ui-grid-summary");["mouseenter","mouseover"].includes(s.info?.showOn?.toLowerCase())&&"mouseover"==e.type?this.showBubble(i.attr("index"),t.attr("col"),l).then(()=>{query(e.delegate).off(".tooltip").on("mouseleave.tooltip",()=>{w2tooltip.hide(this.name+"-bubble")})}):"click"==e.type&&(w2tooltip.hide(this.name+"-bubble"),this.showBubble(i.attr("index"),t.attr("col"),l))}).on("mouseover.body-global",{delegate:".w2ui-clipboard-copy"},l=>{if(!l.delegate._tooltipShow){let t=query(l.delegate).parent(),i=t.parent();var e=this.columns[t.attr("col")];let s=i.parents(".w2ui-grid-body").hasClass("w2ui-grid-summary");w2tooltip.show({name:this.name+"-bubble",anchor:l.delegate,html:w2utils.lang("string"==typeof e.clipboardCopy?e.clipboardCopy:"Copy to clipboard"),position:"top|bottom",offsetY:-2}).hide(e=>{l.delegate._tooltipShow=!1,query(l.delegate).off(".tooltip")}),query(l.delegate).off(".tooltip").on("mouseleave.tooltip",e=>{w2tooltip.hide(this.name+"-bubble")}).on("click.tooltip",e=>{e.stopPropagation(),w2tooltip.update(this.name+"-bubble",w2utils.lang("Copied")),this.clipboardCopy(i.attr("index"),t.attr("col"),s)}),l.delegate._tooltipShow=!0}}).on("click.body-global",{delegate:".w2ui-editable-checkbox"},e=>{var t=query(e.delegate).data();this.editChange.call(this,e.delegate,t.changeind,t.colind,e),this.updateToolbar()}),0===this.records.length&&this.msgEmpty?query(this.box).find(`#grid_${this.name}_body`).append(`
      ${this.msgEmpty}
      `):0=this.searches.length?(this.last.field="",this.last.label=""):(this.last.field=this.searches[e].field,this.last.label=this.searches[e].label)}if(query(this.box).attr("name",this.name).addClass("w2ui-reset w2ui-grid w2ui-inactive").html('
      "),"row"!=this.selectType&&query(this.box).addClass("w2ui-ss"),0{this.searchInitInput(this.last.field,1==e.length?e[0].value:null)},1)}query(this.box).find(`#grid_${this.name}_footer`).html(this.getFooterHTML()),this.last.state||(this.last.state=this.stateSave(!0)),this.stateRestore(),e&&(this.clear(),this.refresh());let t=!1;for(let e=0;e{this.searchReset()},1)):this.reload(),query(this.box).find(`#grid_${this.name}_focus`).on("focus",e=>{clearTimeout(this.last.kbd_timer),this.hasFocus||this.focus()}).on("blur",e=>{clearTimeout(this.last.kbd_timer),this.last.kbd_timer=setTimeout(()=>{this.hasFocus&&this.blur()},100)}).on("paste",i=>{var s=i.clipboardData||null;if(s){let e=s.items,t=[];for(var l in e=2==e.length&&2==(e=2==e.length&&"file"==e[1].kind?[e[1]]:e).length&&"text/plain"==e[0].type&&"text/html"==e[1].type?[e[1]]:e){l=e[l];if("file"===l.kind){var r=l.getAsFile();t.push({kind:"file",data:r})}else if("string"===l.kind&&("text/plain"===l.type||"text/html"===l.type)){i.preventDefault();let e=s.getData("text/plain");-1!=e.indexOf("\r")&&-1==e.indexOf("\n")&&(e=e.replace(/\r/g,"\n")),t.push({kind:"text/html"==l.type?"html":"text",data:e})}}1===t.length&&"file"!=t[0].kind&&(t=t[0].data),w2ui[this.name].paste(t,i),i.preventDefault()}}).on("keydown",function(e){w2ui[p.name].keydown.call(w2ui[p.name],e)});let c;return query(this.box).off("mousedown.mouseStart").on("mousedown.mouseStart",function(l){if(1==l.which&&("text"==p.last.userSelect&&(p.last.userSelect="",query(p.box).find(".w2ui-grid-body").css("user-select","none")),!("row"==p.selectType&&(query(l.target).parents().hasClass("w2ui-head")||query(l.target).hasClass("w2ui-head"))||p.last.move&&"expand"==p.last.move.type))){if(l.altKey)query(p.box).find(".w2ui-grid-body").css("user-select","text"),p.selectNone(),p.last.move={type:"text-select"},p.last.userSelect="text";else{let e=l.target;var r={x:l.offsetX-10,y:l.offsetY-10};let t=!1;for(;e&&(!e.classList||!e.classList.contains("w2ui-grid"));)e.tagName&&"TD"==e.tagName.toUpperCase()&&(t=!0),e.tagName&&"TR"!=e.tagName.toUpperCase()&&1==t&&(r.x+=e.offsetLeft,r.y+=e.offsetTop),e=e.parentNode;p.last.move={x:l.screenX,y:l.screenY,divX:0,divY:0,focusX:r.x,focusY:r.y,recid:query(l.target).parents("tr").attr("recid"),column:parseInt(("TD"==l.target.tagName.toUpperCase()?query(l.target):query(l.target).parents("td")).attr("col")),type:"select",ghost:!1,start:!0},null==p.last.move.recid&&(p.last.move.type="select-column");let i=l.target,s=query(p.box).find("#grid_"+p.name+"_focus");if(p.last.move){let e=p.last.move.focusX,t=p.last.move.focusY;var n=query(i).parents("table").parent();(n.hasClass("w2ui-grid-records")||n.hasClass("w2ui-grid-frecords")||n.hasClass("w2ui-grid-columns")||n.hasClass("w2ui-grid-fcolumns")||n.hasClass("w2ui-grid-summary"))&&(e=p.last.move.focusX-query(p.box).find("#grid_"+p.name+"_records").prop("scrollLeft"),t=p.last.move.focusY-query(p.box).find("#grid_"+p.name+"_records").prop("scrollTop")),(query(i).hasClass("w2ui-grid-footer")||0{p.last.inEditMode||(["INPUT","TEXTAREA","SELECT"].includes(i.tagName)?i.focus():s.get(0)!==document.active&&s.get(0).focus({preventScroll:!0}))},50),p.multiSelect||p.reorderRows||"drag"!=p.last.move.type||delete p.last.move}if(1==p.reorderRows){let e=l.target;var t,i,s,a;"TD"!=e.tagName.toUpperCase()&&(e=query(e).parents("td")[0]),query(e).hasClass("w2ui-col-number")||query(e).hasClass("w2ui-col-order")?(p.selectNone(),p.last.move.reorder=!0,n=query(p.box).find(".w2ui-even.w2ui-empty-record").css("background-color"),t=query(p.box).find(".w2ui-odd.w2ui-empty-record").css("background-color"),query(p.box).find(".w2ui-even td").filter(":not(.w2ui-col-number)").css("background-color",n),query(p.box).find(".w2ui-odd td").filter(":not(.w2ui-col-number)").css("background-color",t),t=p.last.move,i=query(p.box).find(".w2ui-grid-records"),t.ghost||(s=query(p.box).find(`#grid_${p.name}_rec_`+t.recid),a=s.parents("table").find("tr:first-child").get(0).cloneNode(!0),t.offsetY=l.offsetY,t.from=t.recid,t.pos={top:s.get(0).offsetTop-1,left:s.get(0).offsetLeft},t.ghost=query(s.get(0).cloneNode(!0)),t.ghost.removeAttr("id"),t.ghost.find("td").css({"border-top":"1px solid silver","border-bottom":"1px solid silver"}),s.find("td").remove(),s.append(`
      `),i.append('
      '),i.append('
      '),query(p.box).find("#grid_"+p.name+"_ghost").append(a).append(t.ghost)),query(p.box).find("#grid_"+p.name+"_ghost").css({top:t.pos.top+"px",left:t.pos.left+"px"})):p.last.move.reorder=!1}query(document).on("mousemove.w2ui-"+p.name,o).on("mouseup.w2ui-"+p.name,h),l.stopPropagation()}}),this.updateToolbar(),s.finish(),this.last.observeResize=new ResizeObserver(()=>{this.resize()}),this.last.observeResize.observe(this.box),Date.now()-i;function o(t){if(t.target.tagName){var r=p.last.move;if(r&&-1!=["select","select-column"].indexOf(r.type)&&(r.divX=t.screenX-r.x,r.divY=t.screenY-r.y,!(Math.abs(r.divX)<=1&&Math.abs(r.divY)<=1)))if(p.last.cancelClick=!0,1==p.reorderRows&&p.last.move.reorder){let e=query(t.target).parents("tr").attr("recid");(e="-none-"==e?"bottom":e)!=r.from&&(a=query(p.box).find("#grid_"+p.name+"_rec_"+e),query(p.box).find(".insert-before"),a.addClass("insert-before"),r.lastY=t.screenY,r.to=e,a={top:a.get(0)?.offsetTop,left:a.get(0)?.offsetLeft},query(p.box).find("#grid_"+p.name+"_ghost_line").css({top:a.top+"px",left:r.pos.left+"px","border-top":"2px solid #769EFC"})),void query(p.box).find("#grid_"+p.name+"_ghost").css({top:r.pos.top+r.divY+"px",left:r.pos.left+"px"})}else{r.start&&r.recid&&(p.selectNone(),r.start=!1);var n=[],a=("TR"==t.target.tagName.toUpperCase()?query(t.target):query(t.target).parents("tr")).attr("recid");if(null==a){if("row"!=p.selectType&&(!p.last.move||"select"!=p.last.move.type)){var o=parseInt(query(t.target).parents("td").attr("col"));if(isNaN(o))p.removeRange("column-selection"),query(p.box).find(".w2ui-grid-columns .w2ui-col-header, .w2ui-grid-fcolumns .w2ui-col-header").removeClass("w2ui-col-selected"),query(p.box).find(".w2ui-col-number").removeClass("w2ui-row-selected"),delete r.colRange;else{let e=o+"-"+o;r.columno?o+"-"+r.column:e).split("-");for(let e=parseInt(s[0]);e<=parseInt(s[1]);e++)i.push(e);if(r.colRange!=e&&!0!==(c=p.trigger("columnSelect",{target:p.name,columns:i})).isCancelled){null==r.colRange&&p.selectNone();var l=e.split("-");query(p.box).find(".w2ui-grid-columns .w2ui-col-header, .w2ui-grid-fcolumns .w2ui-col-header").removeClass("w2ui-col-selected");for(let e=parseInt(l[0]);e<=parseInt(l[1]);e++)query(p.box).find("#grid_"+p.name+"_column_"+e+" .w2ui-col-header").addClass("w2ui-col-selected");query(p.box).find(".w2ui-col-number").not(".w2ui-head").addClass("w2ui-row-selected"),r.colRange=e,p.removeRange("column-selection"),p.addRange({name:"column-selection",range:[{recid:p.records[0].recid,column:l[0]},{recid:p.records[p.records.length-1].recid,column:l[1]}],style:"background-color: rgba(90, 145, 234, 0.1)"})}}}}else{let l=p.get(r.recid,!0);if(!(null==l||p.records[l]&&p.records[l].recid!=r.recid)){let e=p.get(a,!0);if(null!=e){let i=parseInt(r.column),s=parseInt(("TD"==t.target.tagName.toUpperCase()?query(t.target):query(t.target).parents("td")).attr("col"));isNaN(i)&&isNaN(s)&&(i=0,s=p.columns.length-1),l>e&&(o=l,l=e,e=o);var h,a="ind1:"+l+",ind2;"+e+",col1:"+i+",col2:"+s;if(r.range!=a){r.range=a;for(let t=l;t<=e;t++)if(!(0s&&(h=i,i=s,s=h);for(let e=i;e<=s;e++)p.columns[e].hidden||n.push({recid:p.records[t].recid,column:parseInt(e)})}else n.push(p.records[t].recid);if("row"!=p.selectType){var d=p.getSelection();let e=[];for(let i=0;i{delete p.last.cancelClick},1),!query(t.target).parents().hasClass(".w2ui-head")&&!query(t.target).hasClass(".w2ui-head")){if(i&&-1!=["select","select-column"].indexOf(i.type)){if(null!=i.colRange&&!0!==c.isCancelled){var s=i.colRange.split("-"),l=[];for(let e=0;ee?p.records.splice(e,0,i):p.records.splice(e-1,0,i)),a(),t.finish()}else a()}delete p.last.move,query(document).off(".w2ui-"+p.name)}}function a(){query(p.box).find(`#grid_${p.name}_ghost`).remove(),query(p.box).find(`#grid_${p.name}_ghost_line`).remove(),p.refresh(),delete p.last.move}}}destroy(){var e=this.trigger("destroy",{target:this.name});!0!==e.isCancelled&&(query(this.box).off(),"object"==typeof this.toolbar&&this.toolbar.destroy&&this.toolbar.destroy(),0`+w2utils.lang("records"),i.push({id:"w2ui-skip",text:e,group:!1,icon:"w2ui-icon-empty"})),this.show.saveRestoreState&&i.push({id:"w2ui-stateSave",text:w2utils.lang("Save Grid State"),icon:"w2ui-icon-empty",group:!1},{id:"w2ui-stateReset",text:w2utils.lang("Restore Default State"),icon:"w2ui-icon-empty",group:!1});let t=[];return i.forEach(e=>{e.text=w2utils.lang(e.text),e.checked&&t.push(e.id)}),this.toolbar.set("w2ui-column-on-off",{selected:t,items:i}),i}initColumnDrag(e){if(this.columnGroups&&this.columnGroups.length)throw"Draggable columns are not currently supported with column groups.";let n=this,a={targetPos:null,pressed:!1,columnHead:null};function o(e){var t,i,s,l;a.pressed&&(t=e.pageX,i=e.pageY,e=e,0!=query(e.target).closest("td").length&&(l=query(n.box).find(".w2ui-grid-body").get(0).getBoundingClientRect(),s=query(e.target).closest("td").get(0).getBoundingClientRect(),query(n.box).find(".w2ui-intersection-marker").show().css({left:s.left-l.left+"px"}),a.targetPos=parseInt(query(e.target).closest("td").attr("col"))),s=t,l=i,query(a.ghost).css({left:s-10+"px",top:l-10+"px"}).show())}function h(e){if(a.pressed){a.pressed=!1;var t,i,s=query(n.box).find(".w2ui-grid-ghost"),e=n.trigger("columnDragEnd",{originalEvent:e,target:a.columnHead[0]});if(!0===e.isCancelled)return!1;t=n.columns[a.originalPos],i=n.columns,a.originalPos!=a.targetPos&&null!=a.targetPos&&(i.splice(a.targetPos,0,w2utils.clone(t)),i.splice(i.indexOf(t),1)),query(n.box).find(".w2ui-intersection-marker").hide(),query(a.ghost).remove(),s.remove(),query(document).off(".colDrag"),a={},n.refresh(),e.finish({targetColumn:NaN})}}return query(n.box).off(".colDrag").on("mousedown.colDrag",function(i){if(!a.pressed&&0!==a.numberPreColumnsPresent&&0===i.button){a.pressed=!0;var s,e,l=["w2ui-col-number","w2ui-col-expand","w2ui-col-select"].concat(["w2ui-head-last"]);if(query(i.target).parents().hasClass("w2ui-head")){for(let e=0,t=l.length;e${t}`)[0],query(document.body).append(a.ghost),query(a.ghost).css({display:"none",left:i.pageX,top:i.pageY,opacity:1,margin:"3px 0 0 20px",padding:"3px","background-color":"white",position:"fixed","z-index":999999}).addClass(".w2ui-grid-ghost"),a.offsets=[];for(let e=0,t=s.length;e - ${this.buttons.search.html} -
      - - - x -
      - -
      - -
      - `,this.toolbar.items.push({id:"w2ui-search",type:"html",html:t,onRefresh:async e=>{await e.complete;e=query(this.box).find(`#grid_${this.name}_search_all`);w2utils.bindEvents(query(this.box).find(`#grid_${this.name}_search_all, .w2ui-action`),this),e.on("change",e=>{this.liveSearch||(this.search(this.last.field,e.target.value),this.searchSuggest(!0,!0,this))}).on("blur",()=>{this.last.liveText=""}).on("keyup",e=>{var t=e.target.value;this.liveSearch&&this.last.liveText!=t&&(this.last.liveText=t,this.search(this.last.field,t)),40==e.keyCode&&this.searchSuggest(!0)})}})),Array.isArray(e)&&(t=e.map(e=>e.id),this.show.toolbarAdd&&!t.includes(this.buttons.add.id)&&this.toolbar.items.push(w2utils.extend({},this.buttons.add)),this.show.toolbarEdit&&!t.includes(this.buttons.edit.id)&&this.toolbar.items.push(w2utils.extend({},this.buttons.edit)),this.show.toolbarDelete&&!t.includes(this.buttons.delete.id)&&this.toolbar.items.push(w2utils.extend({},this.buttons.delete)),this.show.toolbarSave&&!t.includes(this.buttons.save.id)&&((this.show.toolbarAdd||this.show.toolbarDelete||this.show.toolbarEdit)&&this.toolbar.items.push({type:"break",id:"w2ui-break2"}),this.toolbar.items.push(w2utils.extend({},this.buttons.save)))),this.toolbar.items.push(...e),this.toolbar.on("click",e=>{var i=this.trigger("toolbar",{target:e.target,originalEvent:e});if(!0!==i.isCancelled){let t;switch(e.detail.item.id){case"w2ui-reload":if(!0===(t=this.trigger("reload",{target:this.name})).isCancelled)return!1;this.reload(),t.finish();break;case"w2ui-column-on-off":e.detail.subItem?(s=e.detail.subItem.id,["w2ui-stateSave","w2ui-stateReset"].includes(s)?this[s.substring(5)]():"w2ui-skip"!=s&&this.columnOnOff(e,e.detail.subItem.id)):(this.initColumnOnOff(),setTimeout(()=>{query(`#w2overlay-${this.name}_toolbar-drop .w2ui-grid-skip`).off(".w2ui-grid").on("click.w2ui-grid",e=>{e.stopPropagation()}).on("keypress",e=>{13==e.keyCode&&(this.skip(e.target.value),this.toolbar.click("w2ui-column-on-off"))})},100));break;case"w2ui-add":if(!0===(t=this.trigger("add",{target:this.name,recid:null})).isCancelled)return!1;t.finish();break;case"w2ui-edit":{var s=this.getSelection();let e=null;if(1==s.length&&(e=s[0]),!0===(t=this.trigger("edit",{target:this.name,recid:e})).isCancelled)return!1;t.finish();break}case"w2ui-delete":this.delete();break;case"w2ui-save":this.save()}i.finish()}}),this.toolbar.on("refresh",e=>{if("w2ui-search"==e.target){let e=this.searchData;setTimeout(()=>{this.searchInitInput(this.last.field,1==e.length?e[0].value:null)},1)}}))}initResize(){let r=this;query(this.box).find(".w2ui-resizer").off(".grid-col-resize").on("click.grid-col-resize",function(e){e.stopPropagation?e.stopPropagation():e.cancelBubble=!0,e.preventDefault&&e.preventDefault()}).on("mousedown.grid-col-resize",function(e){e=e||window.event,r.last.colResizing=!0,r.last.tmp={x:e.screenX,y:e.screenY,gx:e.screenX,gy:e.screenY,col:parseInt(query(this).attr("name"))},r.last.tmp.tds=query(r.box).find("#grid_"+r.name+'_body table tr:first-child td[col="'+r.last.tmp.col+'"]'),e.stopPropagation?e.stopPropagation():e.cancelBubble=!0,e.preventDefault&&e.preventDefault();for(let e=0;e{r.resizeRecords(),r.scroll()},100),r.last.tmp.tds.css({width:t}),r.last.tmp.x=e.screenX,r.last.tmp.y=e.screenY))}).on("mouseup.grid-col-resize",function(e){query(document).off(".grid-col-resize"),r.resizeRecords(),r.scroll(),i.finish({originalEvent:e}),setTimeout(()=>{r.last.colResizing=!1},1)})}).on("dblclick.grid-col-resize",function(e){let t=parseInt(query(this).attr("name")),i=r.columns[t],s=0;if(!1===i.autoResize)return!0;e.stopPropagation?e.stopPropagation():e.cancelBubble=!0,e.preventDefault&&e.preventDefault(),query(r.box).find('.w2ui-grid-records td[col="'+t+'"] > div',r.box).each(()=>{var e=this.offsetWidth-this.scrollWidth;e{var t=query(e).get(0).parentNode;query(e).css({height:t.clientHeight+"px","margin-left":t.clientWidth-3+"px"})})}resizeBoxes(){var e=query(this.box).find(`#grid_${this.name}_header`),t=query(this.box).find(`#grid_${this.name}_toolbar`),i=query(this.box).find(`#grid_${this.name}_fsummary`),s=query(this.box).find(`#grid_${this.name}_summary`),l=query(this.box).find(`#grid_${this.name}_footer`),r=query(this.box).find(`#grid_${this.name}_body`);this.show.header&&e.css({top:"0px",left:"0px",right:"0px"}),this.show.toolbar&&t.css({top:0+(this.show.header?w2utils.getSize(e,"height"):0)+"px",left:"0px",right:"0px"}),0 div.w2ui-grid-box"),r=query(this.box).find(`#grid_${this.name}_header`),n=query(this.box).find(`#grid_${this.name}_toolbar`),a=query(this.box).find(`#grid_${this.name}_summary`),o=query(this.box).find(`#grid_${this.name}_fsummary`),h=query(this.box).find(`#grid_${this.name}_footer`),d=query(this.box).find(`#grid_${this.name}_body`),u=query(this.box).find(`#grid_${this.name}_columns`),c=query(this.box).find(`#grid_${this.name}_fcolumns`),p=query(this.box).find(`#grid_${this.name}_records`),f=query(this.box).find(`#grid_${this.name}_frecords`),m=query(this.box).find(`#grid_${this.name}_scroll1`);let g=8*String(this.total).length+10,y=(g<34&&(g=34),null!=this.lineNumberWidth&&(g=this.lineNumberWidth),!1),w=!1,b=0;for(let e=0;e table")[0]?.clientHeight??0)+(y?w2utils.scrollBarSize():0)&&(w=!0),this.fixedBody?(e=l[0]?.clientHeight-(this.show.header?w2utils.getSize(r,"height"):0)-(this.show.toolbar?w2utils.getSize(n,"height"):0)-("none"!=a.css("display")?w2utils.getSize(a,"height"):0)-(this.show.footer?w2utils.getSize(h,"height"):0),d.css("height",e+"px")):(r=(e=w2utils.getSize(u,"height")+w2utils.getSize(query(this.box).find("#grid_"+this.name+"_records table"),"height")+(y?w2utils.scrollBarSize():0))+(this.show.header?w2utils.getSize(r,"height"):0)+(this.show.toolbar?w2utils.getSize(n,"height"):0)+("none"!=a.css("display")?w2utils.getSize(a,"height"):0)+(this.show.footer?w2utils.getSize(h,"height"):0),l.css("height",r+"px"),d.css("height",e+"px"),s.css("height",w2utils.getSize(l,"height")+"px"));let v=this.records.length;n="object"!=typeof this.url?this.url:this.url.get;if(0==this.searchData.length||n||(v=this.last.searchIds.length),this.fixedBody||(w=!1),y||w?(u.find(":scope > table > tbody > tr:nth-child(1) td.w2ui-head-last").css("width",w2utils.scrollBarSize()+"px").show(),p.css({top:(0 table > tbody > tr:nth-child(1) td.w2ui-head-last").hide(),p.css({top:(0=this.recordHeight&&(e-=this.recordHeight,t++),this.fixedBody){for(let e=v;e',l+='',i.show.lineNumbers&&(s+=''),i.show.selectColumn&&(s+=''),i.show.expandColumn&&(s+=''),l+='',i.show.orderColumn&&(l+='');for(let e=0;ei.last.colEnd)&&!n.frozen||(r='',n.frozen?s+=r:l+=r)}s+=' ',l+=' ',query(i.box).find("#grid_"+i.name+"_frecords > table").append(s),query(i.box).find("#grid_"+i.name+"_records > table").append(l)}let _,q;if(0_&&!0!==C.hidden&&(C.hidden=!0,i=!0),C.gridMinWidth<_&&!0===C.hidden&&(C.hidden=!1,i=!0))}if(!0===i)return void this.refresh();for(let e=0;eparseInt(E.max)&&(E.sizeCalculated=E.max+"px"),$+=parseInt(E.sizeCalculated))}let z=parseInt(_)-parseInt($);if(0 table > tbody > tr:nth-child(1) td.w2ui-head-last").css("width",w2utils.scrollBarSize()+"px").show();let A=1;this.show.lineNumbers&&(A+=g),this.show.selectColumn&&(A+=26),this.show.expandColumn&&(A+=26);for(let e=0;e table > tbody > tr:nth-child(1) td").add(c.find(":scope > table > tbody > tr:nth-child(1) td")).each(e=>{query(e).hasClass("w2ui-col-number")&&query(e).css("width",g+"px");var t=query(e).attr("col");if(null!=t){if("start"==t){let t=0;for(let e=0;e table > tbody > tr").length&&u.find(":scope > table > tbody > tr:nth-child(1) td").add(c.find(":scope > table > tbody > tr:nth-child(1) td")).html("").css({height:"0",border:"0",padding:"0",margin:"0"}),p.find(":scope > table > tbody > tr:nth-child(1) td").add(f.find(":scope > table > tbody > tr:nth-child(1) td")).each(e=>{query(e).hasClass("w2ui-col-number")&&query(e).css("width",g+"px");var t=query(e).attr("col");if(null!=t){if("start"==t){let t=0;for(let e=0;e table > tbody > tr:nth-child(1) td").add(o.find(":scope > table > tbody > tr:nth-child(1) td")).each(e=>{query(e).hasClass("w2ui-col-number")&&query(e).css("width",g+"px");var t=query(e).attr("col");if(null!=t){if("start"==t){let t=0;for(let e=0;e - ${w2utils.lang("Advanced Search")} - - - - - - `;for(let t=0;t",s),s.label=s.caption);var l=``;i+=` - - - "}}return i+=` - - -
      ${w2utils.lang(s.label)||""}${l}`;let e;switch(s.type){case"text":case"alphanumeric":case"hex":case"color":case"list":case"combo":case"enum":e="width: 250px;",-1!=["hex","color"].indexOf(s.type)&&(e="width: 90px;"),i+=``;break;case"int":case"float":case"money":case"currency":case"percent":case"date":case"time":case"datetime":e="width: 90px;","datetime"==s.type&&(e="width: 140px;"),i+=` - `;break;case"select":i+=``}i+=s.text+"
      - - - - -
      `}getOperators(e,t){let i=this.operators[this.operatorsMap[e]]||[],s=(null!=t&&Array.isArray(t)&&(i=t),"");return i.forEach(e=>{let t=e,i=e;Array.isArray(e)?(t=e[1],i=e[0]):w2utils.isPlainObject(e)&&(t=e.text,i=e.oper),null==t&&(t=e),s+=` -`}),s}initOperator(e){let i;var t=this.searches[e],s=this.getSearchData(t.field),l=query(`#w2overlay-${this.name}-search-overlay`),r=l.find(`#grid_${this.name}_range_`+e);let n=l.find(`#grid_${this.name}_field_`+e),a=l.find(`#grid_${this.name}_field2_`+e);var o=l.find(`#grid_${this.name}_operator_`+e).val();switch(n.show(),r.hide(),o){case"between":r.show();break;case"null":case"not null":n.hide(),n.val(o),n.trigger("change")}switch(t.type){case"text":case"alphanumeric":var h=n[0]._w2field;h&&h.reset();break;case"int":case"float":case"hex":case"color":case"money":case"currency":case"percent":case"date":case"time":case"datetime":n[0]._w2field||(new w2field(t.type,{el:n[0],...t.options}),new w2field(t.type,{el:a[0],...t.options}),setTimeout(()=>{n.trigger("keydown"),a.trigger("keydown")},1));break;case"list":case"combo":case"enum":i=t.options,"list"==t.type&&(i.selected={}),"enum"==t.type&&(i.selected=[]),s&&(i.selected=s.value),n[0]._w2field||(h=new w2field(t.type,{el:n[0],...i}),s&&null!=s.text&&h.set({id:s.value,text:s.text}));break;case"select":i='';for(let e=0;e'+t+""}else i+='"}n.html(i)}}initSearches(){var s=query(`#w2overlay-${this.name}-search-overlay`);for(let t=0;t{w2utils.isPlainObject(e)&&(i[t]=e.oper)}),r&&r.operator&&(e=r.operator);var l=this.defaultOperator[this.operatorsMap[l.type]],l=(-1==i.indexOf(e)&&(e=l),s.find(`#grid_${this.name}_operator_`+t).val(e),this.initOperator(t),s.find(`#grid_${this.name}_field_`+t)),n=s.find(`#grid_${this.name}_field2_`+t);null!=r&&(Array.isArray(r.value)?["in","not in"].includes(r.operator)?l[0]._w2field.set(r.value):(l.val(r.value[0]).trigger("change"),n.val(r.value[1]).trigger("change")):null!=r.value&&l.val(r.value).trigger("change"))}s.find(".w2ui-grid-search-advanced *[rel=search]").on("keypress",e=>{13==e.keyCode&&(this.search(),w2tooltip.hide(this.name+"-search-overlay"))})}getColumnsHTML(){let h=this,e="",t="";var i,s,l;return this.show.columnHeaders&&(t=0 ",h.columnGroups[e]),h.columnGroups[e].text=h.columnGroups[e].caption);""!=h.columnGroups[h.columnGroups.length-1].text&&h.columnGroups.push({text:""});h.show.lineNumbers&&(t+='
       
      ');h.show.selectColumn&&(t+='
       
      ');h.show.expandColumn&&(t+='
       
      ');let r=0;s+=``,h.show.orderColumn&&(s+='
       
      ');for(let e=0;e",a),a.text=a.caption);let i=0;for(let e=r;e`);var o=w2utils.lang("function"==typeof a.text?a.text(a):a.text);l=``+e+`
      `+`
      `+(o||" ")+"
      "}else{o=w2utils.lang("function"==typeof n.text?n.text(n):n.text);l=``+`
      ${o||" "}
      `+""}a&&a.frozen?t+=l:s+=l}r+=n.span}return t+="",s+=``,[t,s]}(),s=r(!1),e=l[0]+i[0]+s[0],l[1]+i[1]+s[1]):(l=r(!0),e=l[0],l[1])),[e,t];function r(t){let i="",s="",l=(h.show.lineNumbers&&(i+='
      #
      '),h.show.selectColumn&&(i+='
      '+`
      "),h.show.expandColumn&&(i+='
       
      '),0),r=0,n;s+=``,h.show.orderColumn&&(s+='
       
      ');for(let e=0;e ",o),o.text=o.caption),null==o.size&&(o.size="100%"),e==r&&(n=h.columnGroups[l++]||{},r+=n.span),(eh.last.colEnd)&&!o.frozen||o.hidden||!0===n.main&&!t||(a=h.getColumnCellHTML(e),o&&o.frozen?i+=a:s+=a)}return i+='
       
      ',s+='
       
      ',i+="",s+="",[i,s]}}getColumnCellHTML(t){var i=this.columns[t];if(null==i)return"";var e=!this.reorderColumns||this.columnGroups&&this.columnGroups.length?"":" w2ui-reorder-cols-head ";let s="";for(let e=0;e'+(!1!==i.resizable?'
      ':"")+'
      '+(a||" ")+"
      "}columnTooltipShow(e,t){var i=query(this.box).find("#grid_"+this.name+"_column_"+e),e=this.columns[e],s=this.columnTooltip;w2tooltip.show({name:this.name+"-column-tooltip",anchor:i.get(0),html:e.tooltip,position:s})}columnTooltipHide(e,t){w2tooltip.hide(this.name+"-column-tooltip")}getRecordsHTML(){let e=this.records.length;var t="object"!=typeof this.url?this.url:this.url.get,t=((e=0==this.searchData.length||t?e:this.last.searchIds.length)>this.vs_start?this.last.show_extra=this.vs_extra:this.last.show_extra=this.vs_start,query(this.box).find(`#grid_${this.name}_records`));let i=Math.floor((t.get(0)?.clientHeight||0)/this.recordHeight)+this.last.show_extra+1;(!this.fixedBody||i>e)&&(i=e);var s=this.getRecordHTML(-1,0);let l=""+s[0],r="
      "+s[1];l+='',r+='';for(let e=0;e
      ',r+=' ',this.last.range_start=0,this.last.range_end=i,[l,r]}getSummaryHTML(){if(0!==this.summary.length){var s=this.getRecordHTML(-1,0);let t=""+s[0],i="
      "+s[1];for(let e=0;ethis.last.scrollLeft&&null==l&&(l=e),t+s-30>this.last.scrollLeft+n&&null==r&&(r=e),t+=s);null==r&&(r=this.columns.length-1)}if(null!=l&&(l<0&&(l=0),r<0&&(r=0),l==r&&(0this.last.colStart)for(let e=this.last.colStart;er;e--)a.find("#grid_"+this.name+"_columns #grid_"+this.name+"_column_"+e).remove(),a.find("#grid_"+this.name+'_records td[col="'+e+'"]').remove(),a.find("#grid_"+this.name+'_summary td[col="'+e+'"]').remove();if(l=l;s--)this.columns[s]&&(this.columns[s].frozen||this.columns[s].hidden)||(e.after(this.getColumnCellHTML(s)),f.each(e=>{var t=query(e).parent().attr("index");let i='';null!=t&&(i=this.getCellHTML(parseInt(t),s,!1)),query(e).after(i)}),g.each(e=>{var t=query(e).parent().attr("index");let i='';null!=t&&(i=this.getCellHTML(parseInt(t),s,!0)),query(e).after(i)}));if(r>this.last.colEnd)for(let s=this.last.colEnd+1;s<=r;s++)this.columns[s]&&(this.columns[s].frozen||this.columns[s].hidden)||(t.before(this.getColumnCellHTML(s)),m.each(e=>{var t=query(e).parent().attr("index");let i='';null!=t&&(i=this.getCellHTML(parseInt(t),s,!1)),query(e).before(i)}),y.each(e=>{var t=query(e).parent().attr("index")||-1,t=this.getCellHTML(parseInt(t),s,!0);query(e).before(t)}));this.last.colStart=l,this.last.colEnd=r}else{this.last.colStart=l,this.last.colEnd=r;var o=this.getColumnsHTML(),w=this.getRecordsHTML(),c=this.getSummaryHTML(),p=a.find(`#grid_${this.name}_columns`);let e=a.find(`#grid_${this.name}_records`);var b=a.find(`#grid_${this.name}_frecords`);let t=a.find(`#grid_${this.name}_summary`);p.find("tbody").html(o[1]),b.html(w[0]),e.prepend(w[1]),null!=c&&t.html(c[1]),setTimeout(()=>{e.find(":scope > table").filter(":not(table:first-child)").remove(),t[0]&&(t[0].scrollLeft=this.last.scrollLeft)},1)}this.resizeRecords()}let v=this.records.length;if(v>this.total&&-1!==this.total&&(v=this.total),0!==(v=0==this.searchData.length||i?v:this.last.searchIds.length)&&0!==d.length&&0!==d.prop("clientHeight")){v>this.vs_start?this.last.show_extra=this.vs_extra:this.last.show_extra=this.vs_start;let e=Math.round(d.prop("scrollTop")/this.recordHeight+1),t=e+(Math.round(d.prop("clientHeight")/this.recordHeight)-1);if(e>v&&(e=v),t>=v-1&&(t=v),query(this.box).find("#grid_"+this.name+"_footer .w2ui-footer-right").html((this.show.statusRange?w2utils.formatNumber(this.offset+e)+"-"+w2utils.formatNumber(this.offset+t)+(-1!=this.total?" "+w2utils.lang("of")+" "+w2utils.formatNumber(this.total):""):"")+(i&&this.show.statusBuffered?" ("+w2utils.lang("buffered")+" "+w2utils.formatNumber(v)+(0this.total&&-1!=this.total&&(i=this.total);var x=d.find("#grid_"+this.name+"_rec_top"),_=d.find("#grid_"+this.name+"_rec_bottom"),q=u.find("#grid_"+this.name+"_frec_top"),C=u.find("#grid_"+this.name+"_frec_bottom"),p=(-1!=String(x.next().prop("id")).indexOf("_expanded_row")&&(x.next().remove(),q.next().remove()),this.total>i&&-1!=String(_.prev().prop("id")).indexOf("_expanded_row")&&(_.prev().remove(),C.prev().remove()),parseInt(x.next().attr("line"))),o=parseInt(_.prev().attr("line"));let e,s,l,r,n;if(p=p-this.last.show_extra+2&&1i))break;s.remove(),l.remove()}e=d.find("#grid_"+this.name+"_rec_top").next(),"bottom"==(r=e.attr("line"))&&(r=i);for(let e=parseInt(r)-1;e>=t;e--)this.records[e-1]&&((l=this.records[e-1].w2ui)&&!Array.isArray(l.children)&&(l.expanded=!1),n=this.getRecordHTML(e-1,e),x.after(n[1]),q.after(n[0]))}k(),setTimeout(()=>{this.refreshRanges()},0);b=(t-1)*this.recordHeight;let a=(v-i)*this.recordHeight;function k(){h.markSearch&&(clearTimeout(h.last.marker_timer),h.last.marker_timer=setTimeout(()=>{var t=[];for(let e=0;e{var t=query(h.box).find('td[col="'+e.col+'"]:not(.w2ui-head)');w2utils.marker(t,e.search)})},50))}a<0&&(a=0),x.css("height",b+"px"),q.css("height",b+"px"),_.css("height",a+"px"),C.css("height",a+"px"),this.last.range_start=t,this.last.range_end=i,Math.floor(d.prop("scrollTop")/this.recordHeight)+Math.floor(d.prop("clientHeight")/this.recordHeight)+10>v&&!0!==this.last.pull_more&&(v
      '),h.last.pull_more=!0,h.last.fetch.offset+=h.limit,h.request("load")}).find("td").html(h.autoLoad?'
      ':'
      '+w2utils.lang("Load ${count} more...",{count:h.limit})+"
      "))}}}getRecordHTML(r,n,a){let o="",h="";var d=this.last.selection;let u;if(-1==r){o+='
      ',h+='',this.show.lineNumbers&&(o+=''),this.show.selectColumn&&(o+=''),this.show.expandColumn&&(o+=''),h+='',this.show.orderColumn&&(h+='');for(let e=0;e';t.frozen&&!t.hidden?o+=i:t.hidden||ethis.last.colEnd||(h+=i)}o+='',h+=''}else{var c="object"!=typeof this.url?this.url:this.url.get;if(!0!==a){if(0=this.last.searchIds.length)return"";r=this.last.searchIds[r]}else if(r>=this.records.length)return"";u=this.records[r]}else{if(r>=this.summary.length)return"";u=this.summary[r]}if(!u)return"";null==u.recid&&null!=this.recid&&null!=(c=this.parseField(u,this.recid))&&(u.recid=c);let e=!1,t=(-1!=d.indexes.indexOf(r)&&(e=!0),u.w2ui?u.w2ui.style:""),i=(null!=t&&"string"==typeof t||(t=""),u.w2ui?u.w2ui.class:"");if(null!=i&&"string"==typeof i||(i=""),o+='",h+='",this.show.lineNumbers&&(o+='"),this.show.selectColumn&&(o+='"),this.show.expandColumn){let e="";e=u.w2ui&&!0===u.w2ui.expanded?"-":"+",!u.w2ui||"none"!=u.w2ui.expanded&&Array.isArray(u.w2ui.children)&&u.w2ui.children.length||(e=""),u.w2ui&&"spinner"==u.w2ui.expanded&&(e='
      '),o+='"}h+='',this.show.orderColumn&&(h+='");let s=0,l=0;for(;;){let e=1;var p,f=this.columns[s];if(null==f)break;if(f.hidden)s++,0this.last.colEnd)||f.frozen){if(u.w2ui&&"object"==typeof u.w2ui.colspan){var m=parseInt(u.w2ui.colspan[f.field])||null;if(1=this.columns.length);e++)this.columns[e].hidden&&t++;e=m-t,l=m-1}}var g=this.getCellHTML(r,s,a,e);f.frozen?o+=g:h+=g}s++}}o+='',h+=''}return o+="",h+="",[o,h]}getLineHTML(e){return"
      "+e+"
      "}getCellHTML(i,s,l,e){let r=this,n=this.columns[s];if(null==n)return"";let a=(!0!==l?this.records:this.summary)[i],{value:t,style:o,className:h,attr:d,divAttr:u}=this.getCellValue(i,s,l,!0);var c=-1!==i?this.getCellEditable(i,s):"";let p="max-height: "+parseInt(this.recordHeight)+"px;"+(n.clipboardCopy?"margin-right: 20px":"");var f=!l&&a&&a.w2ui&&a.w2ui.changes&&null!=a.w2ui.changes[n.field],m=this.last.selection;let g=!1,y="";if(-1!=m.indexes.indexOf(i)&&(g=!0),null==e&&(e=a&&a.w2ui&&a.w2ui.colspan&&a.w2ui.colspan[n.field]?a.w2ui.colspan[n.field]:1),0===s&&a&&a.w2ui&&Array.isArray(a.w2ui.children)){let t=0,e=this.get(a.w2ui.parent_recid,!0);for(;;){if(null==e)break;t++;var w=this.records[e].w2ui;if(null==w||null==w.parent_recid)break;e=this.get(w.parent_recid,!0)}if(a.w2ui.parent_recid)for(let e=0;e';var b=0`}if(!0===n.info&&(n.info={}),null!=n.info){let e="w2ui-icon-info",t=("function"==typeof n.info.icon?e=n.info.icon(a,{self:this,index:i,colIndex:s,summary:!!l}):"object"==typeof n.info.icon?e=n.info.icon[this.parseField(a,n.field)]||"":"string"==typeof n.info.icon&&(e=n.info.icon),n.info.style||"");"function"==typeof n.info.style?t=n.info.style(a,{self:this,index:i,colIndex:s,summary:!!l}):"object"==typeof n.info.style?t=n.info.style[this.parseField(a,n.field)]||"":"string"==typeof n.info.style&&(t=n.info.style),y+=``}let v=t,x=(c&&-1!=["checkbox","check"].indexOf(c.type)&&(p+="text-align: center;",v=``,y=""),null==(v=`
      ${y}${String(v)}
      `)&&(v=""),"string"==typeof n.render&&(b=n.render.toLowerCase().split(":"),-1!=["number","int","float","money","currency","percent","size"].indexOf(b[0])&&(o+="text-align: right;")),a&&a.w2ui&&("object"==typeof a.w2ui.style&&("string"==typeof a.w2ui.style[s]&&(o+=a.w2ui.style[s]+";"),"string"==typeof a.w2ui.style[n.field]&&(o+=a.w2ui.style[n.field]+";")),"object"==typeof a.w2ui.class&&("string"==typeof a.w2ui.class[s]&&(h+=a.w2ui.class[s]+" "),"string"==typeof a.w2ui.class[n.field]&&(h+=a.w2ui.class[n.field]+" "))),!1);g&&m.columns[i]?.includes(s)&&(x=!0);let _;return n.clipboardCopy&&(_=''),v='
      ",v=-1===i&&!0===l?'":v}clipboardCopy(e,t,i){var s=(i?this.summary:this.records)[e],l=this.columns[t];let r=l?this.parseField(s,l.field):"";"function"==typeof l.clipboardCopy&&(r=l.clipboardCopy(s,{self:this,index:e,colIndex:t,summary:!!i})),query(this.box).find("#grid_"+this.name+"_focus").text(r).get(0).select(),document.execCommand("copy")}showBubble(s,l,r){var n=this.columns[l].info;if(n){let i="";var a=this.records[s],e=query(this.box).find(`${r?".w2ui-grid-summary":""} #grid_${this.name}_data_${s}_${l} .w2ui-info`);if(this.last.bubbleEl&&w2tooltip.hide(this.name+"-bubble"),this.last.bubbleEl=e,null==n.fields){n.fields=[];for(let e=0;e';else{let e=this.getColumn(h[0]),t=(e=null==e?{field:h[0],caption:h[0]}:e)?this.parseField(a,e.field):"";1n.maxLength&&(t=t.substr(0,n.maxLength)+"..."),i+="")}}i+="
      "+(!0!==a?this.getLineHTML(n,u):"")+"'+(!0===a||u.w2ui&&!0===u.w2ui.hideCheckBox?"":'
      ')+"
      '+(!0!==a?`
      ${e}
      `:"")+"
      '+(!0!==a?'
       
      ':"")+"
      "+v+(_&&w2utils.stripTags(v)?_:"")+"
      "+e.text+""+((0===t?"0":t)||"")+"
      "}else if(w2utils.isPlainObject(t)){for(var d in i='',t){var u=t[d];if(""==u||"-"==u||"--"==u||"---"==u)i+='';else{var c=String(u).split(":");let e=this.getColumn(c[0]),t=(e=null==e?{field:c[0],caption:c[0]}:e)?this.parseField(a,e.field):"";1n.maxLength&&(t=t.substr(0,n.maxLength)+"..."),i+="")}}i+="
      "+d+""+((0===t?"0":t)||"")+"
      "}return w2tooltip.show(w2utils.extend({name:this.name+"-bubble",html:i,anchor:e.get(0),position:"top|bottom",class:"w2ui-info-bubble",style:"",hideOn:["doc-click"]},n.options??{})).hide(()=>[this.last.bubbleEl=null])}}getCellEditable(e,t){var i=this.columns[t],s=this.records[e];if(!s||!i)return null;let l=s.w2ui?s.w2ui.editable:null;return!1===l?null:(null!=l&&!0!==l||"function"==typeof(l=i&&0 '}status(i){if(null!=i)query(this.box).find(`#grid_${this.name}_footer`).find(".w2ui-footer-left").html(i);else{let t="";i=this.getSelection();if(0{query(this.box).find("#grid_"+this.name+"_empty_msg").remove(),w2utils.lock(...i)},10)}unlock(e){setTimeout(()=>{query(this.box).find(".w2ui-message").hasClass("w2ui-closing")||w2utils.unlock(this.box,e)},25)}stateSave(e){var t={columns:[],show:w2utils.clone(this.show),last:{search:this.last.search,multi:this.last.multi,logic:this.last.logic,label:this.last.label,field:this.last.field,scrollTop:this.last.scrollTop,scrollLeft:this.last.scrollLeft},sortData:[],searchData:[]};let l;for(let e=0;e{this.stateColProps[e]&&(l=void 0!==i[e]?i[e]:this.colTemplate[e]||null,s[e]=l)}),t.columns.push(s)}for(let e=0;e{s||(0=this.columns.length)return null==(e=this.nextRow(e))?e:this.nextCell(e,-1,i);var s=this.records[e].w2ui,l=this.columns[t],s=s&&s.colspan&&!isNaN(s.colspan[l.field])?parseInt(s.colspan[l.field]):1;if(null==l)return null;if(l&&l.hidden||0===s)return this.nextCell(e,t,i);if(i){l=this.getCellEditable(e,t);if(null==l||-1!=["checkbox","check"].indexOf(l.type))return this.nextCell(e,t,i)}return{index:e,colIndex:t}}prevCell(e,t,i){t-=1;if(t<0)return null==(e=this.prevRow(e))?e:this.prevCell(e,this.columns.length,i);if(t<0)return null;var s=this.records[e].w2ui,l=this.columns[t],s=s&&s.colspan&&!isNaN(s.colspan[l.field])?parseInt(s.colspan[l.field]):1;if(null==l)return null;if(l&&l.hidden||0===s)return this.prevCell(e,t,i);if(i){l=this.getCellEditable(e,t);if(null==l||-1!=["checkbox","check"].indexOf(l.type))return this.prevCell(e,t,i)}return{index:e,colIndex:t}}nextRow(e,t,i){var s=this.last.searchIds;let l=null;if(-1==(i=null==i?1:i))return this.records.length-1;if(e+ithis.records.length)break;e+=i}var r=this.records[e].w2ui,n=this.columns[t],r=r&&r.colspan&&null!=n&&!isNaN(r.colspan[n.field])?parseInt(r.colspan[n.field]):1;l=0===r?this.nextRow(e,t,i):e}return l}prevRow(e,t,i){var s=this.last.searchIds;let l=null;if(-1==(i=null==i?1:i))return 0;if(0<=e-i&&0===s.length||0s[0]){if(e-=i,0{-1==i.indexOf(e)&&-1!=["label","attr","style","text","span","page","column","anchor","group","groupStyle","groupTitleStyle","groupCollapsible"].indexOf(e)&&(t.html[e]=t[e],delete t[e])}),t}function h(t,i){let s=["style","html"];Object.keys(t).forEach(e=>{-1==s.indexOf(e)&&-1!=["span","column","attr","text","label"].indexOf(e)&&t[e]&&!i.html[e]&&(i.html[e]=t[e])})}r=[],Object.keys(e).forEach(i=>{let s=e[i];if("group"==s.type){if(s.text=i,w2utils.isPlainObject(s.fields)){let i=s.fields;s.fields=[],Object.keys(i).forEach(e=>{let t=i[e];t.field=e,s.fields.push(o(t))})}r.push(s)}else if("tab"==s.type){let e={id:i,text:i},t=(s.style&&(e.style=s.style),a.push(e),l(s.fields).fields);t.forEach(e=>{e.html=e.html||{},e.html.page=a.length-1,h(s,e)}),r.push(...t)}else s.field=i,r.push(o(s))})}r.forEach(s=>{if("group"==s.type){let i={group:s.text||"",groupStyle:s.style||"",groupTitleStyle:s.titleStyle||"",groupCollapsible:!0===s.collapsible};Array.isArray(s.fields)&&s.fields.forEach(e=>{let t=w2utils.clone(e);null==t.html&&(t.html={}),w2utils.extend(t.html,i),Array("span","column","attr","label","page").forEach(e=>{null==t.html[e]&&null!=s[e]&&(t.html[e]=s[e])}),null==t.field&&null!=t.name&&(console.log("NOTICE: form field.name property is deprecated, please use field.field. Field ->",s),t.field=t.name),n.push(t)})}else{let e=w2utils.clone(s);null==e.field&&null!=e.name&&(console.log("NOTICE: form field.name property is deprecated, please use field.field. Field ->",s),e.field=e.name),n.push(e)}});return{fields:n,tabs:a}}(r),this.fields=e.fields,!a&&0e.text()).then(e=>{this.formHTML=e,this.isGenerated=!0,this.box&&this.render(this.box)}):this.formURL||this.formHTML?this.formHTML&&(this.isGenerated=!0):(this.formHTML=this.generateHTML(),this.isGenerated=!0),"string"==typeof this.box&&(this.box=query(this.box).get(0)),this.box&&this.render(this.box)}get(t,i){if(0===arguments.length){var s=[];for(let e=0;ee[t],s)}catch(e){}return e}return this.record[t]}setValue(e,l){if((""===l||null==l||Array.isArray(l)&&0===l.length||w2utils.isPlainObject(l)&&0==Object.keys(l).length)&&(l=null),!this.nestedFields)return this.record[e]=l,!0;try{let s=this.record;return String(e).split(".").map((e,t,i)=>{i.length-1!==t?s=s[e]||(s[e]={},s[e]):s[e]=l}),!0}catch(e){return!1}}getFieldValue(e){let s=this.get(e);if(null!=s){var l=s.el;let t=this.getValue(e);e=this.getValue(e,!0);let i=l.value;["int","float","percent","money","currency"].includes(s.type)&&(i=s.w2field.clean(i)),["radio"].includes(s.type)&&(r=query(l).closest("div").find("input:checked").get(0),i=r?s.options.items[query(r).data("index")].id:null),["toggle","checkbox"].includes(s.type)&&(i=l.checked),-1!==["check","checks"].indexOf(s.type)&&(i=[],0<(r=query(l).closest("div").find("input:checked")).length&&r.each(e=>{e=s.options.items[query(e).data("index")];i.push(e.id)}),Array.isArray(t)||(t=[]));var r=l._w2field?.selected;if(["list","enum","file"].includes(s.type)&&r){var n=r,a=t;if(Array.isArray(n)){i=[];for(let e=0;e{var t=query(e).find(".w2ui-map.key").val(),e=query(e).find(".w2ui-map.value").val();"map"==s.type?i[t]=e:i.push(e)})),{current:i,previous:t,original:e}}}setFieldValue(e,r){let n=this.get(e);if(null!=n){var s=n.el;switch(n.type){case"toggle":case"checkbox":s.checked=!!r;break;case"radio":{r=r?.id??r;let i=query(s).closest("div").find("input");n.options.items.forEach((e,t)=>{e.id===r&&i.filter(`[data-index="${t}"]`).prop("checked",!0)});break}case"check":case"checks":{r=(r=Array.isArray(r)?r:null!=r?[r]:[]).map(e=>e?.id??e);let i=query(s).closest("div").find("input");n.options.items.forEach((e,t)=>{i.filter(`[data-index="${t}"]`).prop("checked",!!r.includes(e.id))});break}case"list":case"combo":let t=r;null==t?.id&&Array.isArray(n.options?.items)&&n.options.items.forEach(e=>{e.id===r&&(t=e)}),t!=r&&this.setValue(n.name,t),"list"==n.type?(n.w2field.selected=t,n.w2field.refresh()):n.el.value=t?.text??r;break;case"enum":case"file":{let s=[...r=Array.isArray(r)?r:null!=r?[r]:[]],l=!1;s.forEach((t,i)=>{null==t?.id&&Array.isArray(n.options.items)&&n.options.items.forEach(e=>{e.id==t&&(s[i]=e,l=!0)})}),l&&this.setValue(n.name,s),n.w2field.selected=s,n.w2field.refresh();break}case"map":case"array":"map"!=n.type||null!=r&&w2utils.isPlainObject(r)||(this.setValue(n.field,{}),r=this.getValue(n.field)),"array"!=n.type||null!=r&&Array.isArray(r)||(this.setValue(n.field,[]),r=this.getValue(n.field));var i=query(n.el).parent().find(".w2ui-map-container");n.el.mapRefresh(r,i);break;case"div":case"custom":query(s).html(r);break;case"html":case"empty":break;default:s.value=r??""}}}show(){var t=[];for(let e=0;e{!function(e){let t=!0;return e.each(e=>{"none"!=e.style.display&&(t=!1)}),t}(query(e).find(".w2ui-field"))?query(e).show():query(e).hide()})}change(){Array.from(arguments).forEach(e=>{e=this.get(e);e.$el&&e.$el.change()})}reload(e){return("object"!=typeof this.url?this.url:this.url.get)&&null!=this.recid?this.request(e):("function"==typeof e&&e(),new Promise(e=>{e()}))}clear(){0!=arguments.length?Array.from(arguments).forEach(e=>{let s=this.record;String(e).split(".").map((e,t,i)=>{i.length-1!==t?s=s[e]:delete s[e]}),this.refresh(e)}):(this.recid=null,this.record={},this.original=null,this.refresh(),this.hideErrors())}error(e){var t=this.trigger("error",{target:this.name,message:e,fetchCtrl:this.last.fetchCtrl,fetchOptions:this.last.fetchOptions});!0!==t.isCancelled&&(setTimeout(()=>{this.message(e)},1),t.finish())}message(e){return w2utils.message({owner:this,box:this.box,after:".w2ui-form-header"},e)}confirm(e){return w2utils.confirm({owner:this,box:this.box,after:".w2ui-form-header"},e)}validate(e){null==e&&(e=!0);var t=[];for(let e=0;e{var i=w2utils.extend({anchorClass:"w2ui-error",class:"w2ui-light",position:"right|left",hideOn:["input"]},t.options);if(null!=t.field){let e=t.field.el;"radio"===t.field.type?e=query(t.field.el).closest("div").get(0):["enum","file"].includes(t.field.type),w2tooltip.show(w2utils.extend({anchor:e,name:`${this.name}-${t.field.field}-error`,html:t.error},i))}}),query(e[0].field.$el).parents(".w2ui-page").off(".hideErrors").on("scroll.hideErrors",e=>{this.hideErrors()}))}hideErrors(){this.fields.forEach(e=>{w2tooltip.hide(`${this.name}-${e.field}-error`)})}getChanges(){let e={};return e=null!=this.original&&"object"==typeof this.original&&0!==Object.keys(this.record).length?function e(t,i,s){if(Array.isArray(t)&&Array.isArray(i))for(;t.length{if(-1!=["list","combo","enum"].indexOf(e.type)){var t={nestedFields:!0,record:s};let i=this.getValue.call(t,e.field);w2utils.isPlainObject(i)&&null!=i.id&&this.setValue.call(t,e.field,i.id),Array.isArray(i)&&i.forEach((e,t)=>{w2utils.isPlainObject(e)&&e.id&&(i[t]=e.id)})}var i;"map"==e.type&&(t={nestedFields:!0,record:s},(t=this.getValue.call(t,e.field))._order&&delete t._order),"file"==e.type&&(t={nestedFields:!0,record:s},(i=this.getValue.call(t,e.field)??[]).forEach(e=>{delete e.file,delete e.modified}),this.setValue.call(t,e.field,i))}),!0===e&&Object.keys(s).forEach(e=>{this.get(e)||delete s[e]}),s}prepareParams(i,e){var t=this.dataType??w2utils.settings.dataType;let s=e.body;switch(t){case"HTTPJSON":s={request:s},l();break;case"HTTP":l();break;case"RESTFULL":"POST"==e.method?e.headers["Content-Type"]="application/json":l();break;case"JSON":"GET"==e.method?(s={request:s},l()):(e.headers["Content-Type"]="application/json",e.method="POST")}return e.body="string"==typeof e.body?e.body:JSON.stringify(e.body),e;function l(){Object.keys(s).forEach(e=>{let t=s[e];"object"==typeof t&&(t=JSON.stringify(t)),i.searchParams.append(e,t)}),delete e.body}}request(e,s){let l=this,r,i;var n=new Promise((e,t)=>{r=e,i=t});if("function"==typeof e&&(s=e,e=null),null==e&&(e={}),this.url&&("object"!=typeof this.url||this.url.get)){var a={action:"get"},e=(a.recid=this.recid,a.name=this.name,w2utils.extend(a,this.postData),w2utils.extend(a,e),this.trigger("request",{target:this.name,url:this.url,httpMethod:"GET",postData:a,httpHeaders:this.httpHeaders}));if(!0!==e.isCancelled){this.record={},this.original=null,this.lock(w2utils.lang(this.msgRefresh));let t=e.detail.url;if("object"==typeof t&&t.get&&(t=t.get),this.last.fetchCtrl)try{this.last.fetchCtrl.abort()}catch(e){}if(0!=Object.keys(this.routeData).length){var o=w2utils.parseRoute(t);if(0{200!=i?.status?i&&h(i):i.json().catch(h).then(e=>{var t=l.trigger("load",{target:l.name,fetchCtrl:this.last.fetchCtrl,fetchOptions:this.last.fetchOptions,data:i});!0!==t.isCancelled&&(!0===(e=e.record?e:{error:!1,record:e}).error?l.error(w2utils.lang(e.message)):l.record=w2utils.clone(e.record),l.unlock(),t.finish(),l.refresh(),l.setFocus(),"function"==typeof s&&s(e),r(e))})}),e.finish(),n;function h(e){var t;"AbortError"!==e.name&&(l.unlock(),!0!==(t=l.trigger("error",{response:e,fetchCtrl:l.last.fetchCtrl,fetchOptions:l.last.fetchOptions})).isCancelled&&(e.status&&200!=e.status?l.error(e.status+": "+e.statusText):(console.log("ERROR: Server request failed.",e,". ","Expected Response:",{error:!1,record:{field1:1,field2:"item"}},"OR:",{error:!0,message:"Error description"}),l.error(String(e))),t.finish(),i(e)))}}}}submit(e,t){return this.save(e,t)}save(e,i){let s=this,l,r;var n=new Promise((e,t)=>{l=e,r=t}),a=("function"==typeof e&&(i=e,e=null),s.validate(!0));if(0===a.length)if(null==e&&(e={}),!s.url||"object"==typeof s.url&&!s.url.save)console.log("ERROR: Form cannot be saved because no url is defined.");else{s.lock(w2utils.lang(s.msgSaving)+' ');a={action:"save"},e=(a.recid=s.recid,a.name=s.name,w2utils.extend(a,s.postData),w2utils.extend(a,e),a.record=w2utils.clone(s.record),s.trigger("submit",{target:s.name,url:s.url,httpMethod:"POST",postData:a,httpHeaders:s.httpHeaders}));if(!0!==e.isCancelled){let t=e.detail.url;if("object"==typeof t&&t.save&&(t=t.save),s.last.fetchCtrl&&s.last.fetchCtrl.abort(),0{s.unlock(),200!=e?.status?h(e??{}):e.json().catch(h).then(e=>{var t=s.trigger("save",{target:s.name,fetchCtrl:this.last.fetchCtrl,fetchOptions:this.last.fetchOptions,data:e});!0!==t.isCancelled&&(!0===e.error?s.error(w2utils.lang(e.message)):s.original=null,t.finish(),s.refresh(),"function"==typeof i&&i(e),l(e))})}),e.finish(),n;function h(e){var t;"AbortError"!==e?.name&&(s.unlock(),!0!==(t=s.trigger("error",{response:e,fetchCtrl:s.last.fetchCtrl,fetchOptions:s.last.fetchOptions})).isCancelled&&(e.status&&200!=e.status?s.error(e.status+": "+e.statusText):(console.log("ERROR: Server request failed.",e,". ","Expected Response:",{error:!1,record:{field1:1,field2:"item"}},"OR:",{error:!0,message:"Error description"}),s.error(String(e))),t.finish(),r()))}}}}lock(e,t){var i=Array.from(arguments);i.unshift(this.box),w2utils.lock(...i)}unlock(e){var t=this.box;w2utils.unlock(t,e)}lockPage(e,t,i){e=query(this.box).find(".page-"+e);return!!e.length&&(w2utils.lock(e,t,i),!0)}unlockPage(e,t){e=query(this.box).find(".page-"+e);return!!e.length&&(w2utils.unlock(e,t),!0)}goto(e){this.page!==e&&(null!=e&&(this.page=e),!0===query(this.box).data("autoSize")&&(query(this.box).get(0).clientHeight=0),this.refresh())}generateHTML(){let s=[],t="",l,r,n,a;for(let e=0;e",h),h.html.label=h.html.caption),null==h.html.label&&(h.html.label=h.field),h.html=w2utils.extend({label:"",span:6,attr:"",text:"",style:"",page:0,column:0},h.html),null==l&&(l=h.html.page),null==r&&(r=h.html.column);let i=``;switch(h.type){case"pass":case"password":i=i.replace('type="text"','type="password"');break;case"checkbox":i=` - `;break;case"check":case"checks":{null==h.options.items&&null!=h.html.items&&(h.options.items=h.html.items);let t=h.options.items;i="",0<(t=Array.isArray(t)?t:[]).length&&(t=w2utils.normMenu.call(this,t,h));for(let e=0;e - -  ${t[e].text} - -
      `;break}case"radio":{i="",null==h.options.items&&null!=h.html.items&&(h.options.items=h.html.items);let t=h.options.items;0<(t=Array.isArray(t)?t:[]).length&&(t=w2utils.normMenu.call(this,t,h));for(let e=0;e - -  ${t[e].text} - -
      `;break}case"select":{i=`";break}case"textarea":i=``;break;case"toggle":i=` -
      `;break;case"map":case"array":h.html.key=h.html.key||{},h.html.value=h.html.value||{},h.html.tabindex_str=o,i=''+(h.html.text||"")+'
      ';break;case"div":case"custom":i='
      '+(h&&h.html&&h.html.html?h.html.html:"")+"
      ";break;case"html":case"empty":i=h&&h.html?(h.html.html||"")+(h.html.text||""):""}if(""!==t&&(l!=h.html.page||r!=h.html.column||h.html.group&&t!=h.html.group)&&(s[l][r]+="\n \n ",t=""),h.html.group&&t!=h.html.group){let e="";h.html.groupCollapsible&&(e=''),n+='\n
      \n
      "+e+w2utils.lang(h.html.group)+'
      \n
      ',t=h.html.group}if(null==h.html.anchor){let e=null!=h.html.span?"w2ui-span"+h.html.span:"",t=""+w2utils.lang("checkbox"!=h.type?h.html.label:h.html.text)+"";h.html.label||(t=""),n+='\n
      \n '+t+("empty"===h.type?i:"\n
      "+i+("array"!=h.type&&"map"!=h.type?w2utils.lang("checkbox"!=h.type?h.html.text:""):"")+"
      ")+"\n
      "}else s[h.html.page].anchors=s[h.html.page].anchors||{},s[h.html.page].anchors[h.html.anchor]='
      '+("empty"===h.type?i:"
      "+w2utils.lang("checkbox"!=h.type?h.html.label:h.html.text,!0)+i+w2utils.lang("checkbox"!=h.type?h.html.text:"")+"
      ")+"
      ";null==s[h.html.page]&&(s[h.html.page]={}),null==s[h.html.page][h.html.column]&&(s[h.html.page][h.html.column]=""),s[h.html.page][h.html.column]+=n,l=h.html.page,r=h.html.column}if(""!==t&&(s[l][r]+="\n
      \n
      "),this.tabs.tabs)for(let e=0;e",d),d.text=d.caption),d.text&&(u.text=d.text),d.style&&(u.style=d.style),d.class&&(u.class=d.class)):(u.text=i,-1!==["save","update","create"].indexOf(i.toLowerCase())?u.class="w2ui-btn-blue":u.class=""),e+='\n ",a++}e+="\n"}n="";for(let i=0;i',!s[i])return console.log(`ERROR: Page ${i} does not exist`),!1;s[i].before&&(n+=s[i].before),n+='
      ',Object.keys(s[i]).sort().forEach((e,t)=>{e==parseInt(e)&&(n+='
      '+(s[i][e]||"")+"\n
      ")}),n+="\n
      ",s[i].after&&(n+=s[i].after),n+="\n",s[i].anchors&&Object.keys(s[i].anchors).forEach((e,t)=>{n=n.replace(e,s[i].anchors[e])})}return n+=e}toggleGroup(e,t){var i,e=query(this.box).find('.w2ui-group-title[data-group="'+w2utils.base64encode(e)+'"]');0!==e.length&&(i=query(e.prop("nextElementSibling")),(t=void 0===t?"none"==i.css("display"):t)?(i.show(),e.find("span").addClass("w2ui-icon-collapse").removeClass("w2ui-icon-expand")):(i.hide(),e.find("span").addClass("w2ui-icon-expand").removeClass("w2ui-icon-collapse")))}action(e,t){var i=this.actions[e];let s=i;w2utils.isPlainObject(i)&&i.onClick&&(s=i.onClick);e=this.trigger("action",{target:e,action:i,originalEvent:t});!0!==e.isCancelled&&("function"==typeof s&&s.call(this,t),e.finish())}resize(){let d=this;var e=this.trigger("resize",{target:this.name});if(!0!==e.isCancelled){let l=query(this.box).find(":scope > div.w2ui-form-box"),r=query(this.box).find(":scope > div .w2ui-form-header"),n=query(this.box).find(":scope > div .w2ui-form-toolbar"),a=query(this.box).find(":scope > div .w2ui-form-tabs"),o=query(this.box).find(":scope > div .w2ui-page");var t=query(this.box).find(":scope > div .w2ui-page.page-"+this.page+" > div");let h=query(this.box).find(":scope > div .w2ui-buttons");var{headerHeight:i,tbHeight:s,tabsHeight:u}=c();function c(){var e=d.box.getBoundingClientRect(),t=""!==d.header?w2utils.getSize(r,"height"):0,i=Array.isArray(d.toolbar?.items)&&0("string"!=typeof e&&console.log("ERROR: Arguments in refresh functions should be field names"),this.get(e,!0))).filter((e,t)=>null!=e):(query(this.box).find("input, textarea, select").each(e=>{var t=null!=query(e).attr("name")?query(e).attr("name"):query(e).attr("id"),i=this.get(t);if(i){var s=query(e).closest(".w2ui-page");if(0{query(e).off("click").on("click",function(e){let t=this.value;this.id&&(t=this.id),this.name&&(t=this.name),c.action(t,e)})});for(let e=0;e{t+=``}),s.$el.html(t)}this.W2FIELD_TYPES.includes(s.type)&&(s.w2field=s.w2field??new w2field(w2utils.extend({},s.options,{type:s.type})),s.w2field.render(s.el)),["map","array"].includes(s.type)&&!function(d){let u;d.el.mapAdd=function(e,t,i){var s=(e.disabled?" readOnly ":"")+(e.html.tabindex_str||""),i=` -
      - ${"map"==e.type?` - ${e.html.key.text||""} - `:""} - - ${e.html.value.text||""} -
      `;t.append(i)},d.el.mapRefresh=function(l,r){let n,a,o;var h;"map"==d.type&&(null==(l=w2utils.isPlainObject(l)?l:{})._order&&(l._order=Object.keys(l)),n=l._order),"array"==d.type&&(Array.isArray(l)||(l=[]),n=l.map((e,t)=>t));for(let e=r.find(".w2ui-map-field").length-1;e>=n.length;e--)r.find(`div[data-index='${e}']`).remove();for(let s=0;se.key==t)).length&&(i=h[0].value),a.val(t),o.val(i),!0!==d.disabled&&!1!==d.disabled||(a.prop("readOnly",!!d.disabled),o.prop("readOnly",!!d.disabled))}var e=n.length,t=r.find(`div[data-index='${e}']`),e=(0!==t.length||a&&""==a.val()&&""==o.val()||a&&(!0===a.prop("readOnly")||!0===a.prop("disabled"))||d.el.mapAdd(d,r,e),!0!==d.disabled&&!1!==d.disabled||(t.find(".key").prop("readOnly",!!d.disabled),t.find(".value").prop("readOnly",!!d.disabled)),query(d.el).get(0)?.nextSibling);query(e).find("input.w2ui-map").off(".mapChange").on("keyup.mapChange",function(e){var t=query(e.target).closest(".w2ui-map-field"),i=t.get(0).nextElementSibling,t=t.get(0).previousElementSibling,s=(13==e.keyCode&&((s=u??i)instanceof HTMLElement&&(0<(s=query(s).find("input")).length&&s.get(0).focus()),u=void 0),query(e.target).hasClass("key")?"key":"value");38==e.keyCode&&t&&(query(t).find("input."+s).get(0).select(),e.preventDefault()),40==e.keyCode&&i&&(query(i).find("input."+s).get(0).select(),e.preventDefault())}).on("keydown.mapChange",function(e){38!=e.keyCode&&40!=e.keyCode||e.preventDefault()}).on("input.mapChange",function(e){var e=query(e.target).closest("div"),t=e.data("index"),i=e.get(0).nextElementSibling;if(""==e.find("input").val()||i){if(""==e.find("input").val()&&i){let t=!0;query(i).find("input").each(e=>{""!=e.value&&(t=!1)}),t&&query(i).remove()}}else d.el.mapAdd(d,r,parseInt(t)+1)}).on("change.mapChange",function(e){null==c.original&&(0{t._order.push(e.value)}),c.trigger("change",{target:d.field,field:d.field,originalEvent:e,value:{current:t,previous:i,original:s}}));!0!==l.isCancelled&&("map"==d.type&&(t._order=t._order.filter(e=>""!==e),delete t[""]),"array"==d.type&&(t=t.filter(e=>""!==e)),""==query(e.target).parent().find("input").val()&&(u=e.target),c.setValue(d.field,t),d.el.mapRefresh(t,r),l.finish())})}}(s),this.setFieldValue(s.field,this.getValue(s.name)),s.$el.trigger("change")}}return t.finish(),this.resize(),Date.now()-e}}}render(e){var t=Date.now();let i=this;"string"==typeof e&&(e=query(e).get(0));var s=this.trigger("render",{target:this.name,box:e??this.box});if(!0!==s.isCancelled&&(null!=e&&(0'+(""!==this.header?'
      '+w2utils.lang(this.header)+"
      ":"")+' '+this.formHTML+"",e=(query(this.box).attr("name",this.name).addClass("w2ui-reset w2ui-form").html(e),0this.refresh()):this.refresh(),this.last.observeResize=new ResizeObserver(()=>{this.resize()}),this.last.observeResize.observe(this.box),-1!=this.focus){let e=0,t=()=>{0 input, select, textarea, div > label:nth-child(1) > [type=radio]").filter(":not(.file-input)");null==i[e].offsetParent&&i.length>=e;)e++;i[e]&&(t=query(i[e]))}else"string"==typeof e&&(t=query(this.box).find(`[name='${e}']`));return 0 `,arrow:!1,advanced:null,transparent:!0},this.options=w2utils.extend({},e,t),t=this.options;break;case"date":e={format:w2utils.settings.dateFormat,keyboard:!0,autoCorrect:!0,start:null,end:null,blockDates:[],blockWeekdays:[],colored:{},btnNow:!0},this.options=w2utils.extend({type:"date"},e,t),t=this.options,null==query(this.el).attr("placeholder")&&query(this.el).attr("placeholder",t.format);break;case"time":e={format:w2utils.settings.timeFormat,keyboard:!0,autoCorrect:!0,start:null,end:null,btnNow:!0,noMinutes:!1},this.options=w2utils.extend({type:"time"},e,t),t=this.options,null==query(this.el).attr("placeholder")&&query(this.el).attr("placeholder",t.format);break;case"datetime":e={format:w2utils.settings.dateFormat+"|"+w2utils.settings.timeFormat,keyboard:!0,autoCorrect:!0,start:null,end:null,startTime:null,endTime:null,blockDates:[],blockWeekdays:[],colored:{},btnNow:!0,noMinutes:!1},this.options=w2utils.extend({type:"datetime"},e,t),t=this.options,null==query(this.el).attr("placeholder")&&query(this.el).attr("placeholder",t.placeholder||t.format);break;case"list":case"combo":e={items:[],selected:{},url:null,recId:null,recText:null,method:null,interval:350,postData:{},minLength:1,cacheMax:250,maxDropHeight:350,maxDropWidth:null,minDropWidth:null,match:"begins",icon:null,iconStyle:"",align:"both",altRows:!0,onSearch:null,onRequest:null,onLoad:null,onError:null,renderDrop:null,compare:null,filter:!0,hideSelected:!1,prefix:"",suffix:"",openOnFocus:!1,markSearch:!1},"function"==typeof t.items&&(t._items_fun=t.items),t.items=w2utils.normMenu.call(this,t.items),"list"===this.type&&(query(this.el).addClass("w2ui-select"),!w2utils.isPlainObject(t.selected)&&Array.isArray(t.items)&&t.items.forEach(e=>{e&&e.id===t.selected&&(t.selected=w2utils.clone(e))})),t=w2utils.extend({},e,t),this.options=t,w2utils.isPlainObject(t.selected)||(t.selected={}),this.selected=t.selected,query(this.el).attr("autocapitalize","off").attr("autocomplete","off").attr("autocorrect","off").attr("spellcheck","false"),null!=t.selected.text&&query(this.el).val(t.selected.text);break;case"enum":e={items:[],selected:[],max:0,url:null,recId:null,recText:null,interval:350,method:null,postData:{},minLength:1,cacheMax:250,maxItemWidth:250,maxDropHeight:350,maxDropWidth:null,match:"contains",align:"",altRows:!0,openOnFocus:!1,markSearch:!1,renderDrop:null,renderItem:null,compare:null,filter:!0,hideSelected:!0,style:"",onSearch:null,onRequest:null,onLoad:null,onError:null,onClick:null,onAdd:null,onNew:null,onRemove:null,onMouseEnter:null,onMouseLeave:null,onScroll:null},"function"==typeof(t=w2utils.extend({},e,t,{suffix:""})).items&&(t._items_fun=t.items),t.items=w2utils.normMenu.call(this,t.items),t.selected=w2utils.normMenu.call(this,t.selected),this.options=t,Array.isArray(t.selected)||(t.selected=[]),this.selected=t.selected;break;case"file":e={selected:[],max:0,maxSize:0,maxFileSize:0,maxItemWidth:250,maxDropHeight:350,maxDropWidth:null,readContent:!0,silent:!0,align:"both",altRows:!0,renderItem:null,style:"",onClick:null,onAdd:null,onRemove:null,onMouseEnter:null,onMouseLeave:null},t=w2utils.extend({},e,t),this.options=t,Array.isArray(t.selected)||(t.selected=[]),this.selected=t.selected,null==query(this.el).attr("placeholder")&&query(this.el).attr("placeholder",w2utils.lang("Attach files by dragging and dropping or Click to Select"))}query(this.el).css("box-sizing","border-box").addClass("w2field w2ui-input").off(".w2field").on("change.w2field",e=>{this.change(e)}).on("click.w2field",e=>{this.click(e)}).on("focus.w2field",e=>{this.focus(e)}).on("blur.w2field",e=>{"list"!==this.type&&this.blur(e)}).on("keydown.w2field",e=>{this.keyDown(e)}).on("keyup.w2field",e=>{this.keyUp(e)}),this.addPrefix(),this.addSuffix(),this.addSearch(),this.addMultiSearch(),this.change(new Event("change"))}else console.log("ERROR: w2field could only be applied to INPUT or TEXTAREA.",this.el)}get(){let e;return e=-1!==["list","enum","file"].indexOf(this.type)?this.selected:query(this.el).val()}set(e,t){-1!==["list","enum","file"].indexOf(this.type)?("list"!==this.type&&t?(Array.isArray(this.selected)||(this.selected=[]),this.selected.push(e),(t=w2menu.get(this.el.id+"_menu"))&&(t.options.selected=this.selected)):(null==e&&(e=[]),t="enum"!==this.type||Array.isArray(e)?e:[e],this.selected=t),query(this.el).trigger("input").trigger("change"),this.refresh()):query(this.el).val(e)}setIndex(e,t){if(-1!==["list","enum"].indexOf(this.type)){var i=this.options.items;if(i&&i[e])return"list"==this.type&&(this.selected=i[e]),"enum"==this.type&&(t||(this.selected=[]),this.selected.push(i[e])),(t=w2menu.get(this.el.id+"_menu"))&&(t.options.selected=this.selected),query(this.el).trigger("input").trigger("change"),this.refresh(),!0}return!1}refresh(){let s=this.options;var e=Date.now(),t=getComputedStyle(this.el);if("list"==this.type){if(query(this.el).parent().css("white-space","nowrap"),this.helpers.prefix&&this.helpers.prefix.hide(),!this.helpers.search)return;null==this.selected&&s.icon?s.prefix=` - - `:s.prefix="",this.addPrefix();let e=query(this.helpers.search_focus);var i=query(e[0].previousElementSibling);e.css({outline:"none"}),""===e.val()?(e.css("opacity",0),i.css("opacity",0),this.selected?.id?(n=this.selected.text,r=this.findItemIndex(s.items,this.selected.id),null!=n&&query(this.el).val(w2utils.lang(n)).data({selected:n,selectedIndex:r[0]})):(this.el.value="",query(this.el).removeData("selected selectedIndex"))):(e.css("opacity",1),i.css("opacity",1),query(this.el).val(""),setTimeout(()=>{this.helpers.prefix&&this.helpers.prefix.hide(),s.icon?(e.css("margin-left","17px"),query(this.helpers.search).find(".w2ui-icon-search").addClass("show-search")):(e.css("margin-left","0px"),query(this.helpers.search).find(".w2ui-icon-search").removeClass("show-search"))},1)),query(this.el).prop("readonly")||query(this.el).prop("disabled")?setTimeout(()=>{this.helpers.prefix&&query(this.helpers.prefix).css("opacity","0.6"),this.helpers.suffix&&query(this.helpers.suffix).css("opacity","0.6")},1):setTimeout(()=>{this.helpers.prefix&&query(this.helpers.prefix).css("opacity","1"),this.helpers.suffix&&query(this.helpers.suffix).css("opacity","1")},1)}let l=this.helpers.multi;if(["enum","file"].includes(this.type)&&l){let i="";Array.isArray(this.selected)&&this.selected.forEach((e,t)=>{null!=e&&(i+=` -
      - ${"function"==typeof s.renderItem?s.renderItem(e,t,`
        
      `):` - ${e.icon?``:""} -
        
      - ${("enum"===this.type?e.text:e.name)??e.id??e} - ${e.size?` - ${w2utils.formatSize(e.size)}`:""} - `} -
      `)});var r,n=l.find(".w2ui-multi-items");s.style&&l.attr("style",l.attr("style")+";"+s.style),query(this.el).css("z-index","-1"),query(this.el).prop("readonly")||query(this.el).prop("disabled")?setTimeout(()=>{l[0].scrollTop=0,l.addClass("w2ui-readonly").find(".li-item").css("opacity","0.9").parent().find(".li-search").hide().find("input").prop("readonly",!0).closest(".w2ui-multi-items").find(".w2ui-list-remove").hide()},1):setTimeout(()=>{l.removeClass("w2ui-readonly").find(".li-item").css("opacity","1").parent().find(".li-search").show().find("input").prop("readonly",!1).closest(".w2ui-multi-items").find(".w2ui-list-remove").show()},1),0${query(this.el).attr("placeholder")}`)),l.off(".w2item").on("scroll.w2item",e=>{e=this.trigger("scroll",{target:this.el,originalEvent:e});!0!==e.isCancelled&&(w2tooltip.hide(this.el.id+"_preview"),e.finish())}).find(".li-item").on("click.w2item",e=>{var i=query(e.target).closest(".li-item"),s=i.attr("index"),l=this.selected[s];if(!query(i).hasClass("li-search")){e.stopPropagation();let t;if(query(e.target).hasClass("w2ui-list-remove"))query(this.el).prop("readonly")||query(this.el).prop("disabled")||!0!==(t=this.trigger("remove",{target:this.el,originalEvent:e,item:l})).isCancelled&&(this.selected.splice(s,1),query(this.el).trigger("input").trigger("change"),query(e.target).remove());else if(!0!==(t=this.trigger("click",{target:this.el,originalEvent:e.originalEvent,item:l})).isCancelled){let e=l.tooltip;if("file"===this.type&&(/image/i.test(l.type)&&(e=` -
      - -
      `),e+=` -
      -
      ${w2utils.lang("Name")}:
      -
      ${l.name}
      -
      ${w2utils.lang("Size")}:
      -
      ${w2utils.formatSize(l.size)}
      -
      ${w2utils.lang("Type")}:
      -
      ${l.type}
      -
      ${w2utils.lang("Modified")}:
      -
      ${w2utils.date(l.modified)}
      -
      `),e){let t=this.el.id+"_preview";w2tooltip.show({name:t,anchor:i.get(0),html:e,hideOn:["doc-click"],class:""}).show(e=>{query(`#w2overlay-${t} img`).on("load",function(e){var t=this.clientWidth,i=this.clientHeight;t<300&i<300||(i<=t&&300{var t=query(e.target).closest(".li-item");query(t).hasClass("li-search")||(t=this.selected[query(e.target).attr("index")],!0!==(e=this.trigger("mouseEnter",{target:this.el,originalEvent:e,item:t})).isCancelled&&e.finish())}).on("mouseleave.w2item",e=>{var t=query(e.target).closest(".li-item");query(t).hasClass("li-search")||(t=this.selected[query(e.target).attr("index")],!0!==(e=this.trigger("mouseLeave",{target:this.el,originalEvent:e,item:t})).isCancelled&&e.finish())}),"enum"===this.type?this.helpers.multi.find("input").css({width:"15px"}):this.helpers.multi.find(".li-search").hide(),this.resize()}return Date.now()-e}resize(){var e=this.el.clientWidth,t=getComputedStyle(this.el),i=this.helpers.search,s=this.helpers.multi,l=this.helpers.suffix,r=this.helpers.prefix,i=(i&&query(i).css("width",e),s&&query(s).css("width",e-parseInt(t["margin-left"],10)-parseInt(t["margin-right"],10)),l&&this.addSuffix(),r&&this.addPrefix(),this.helpers.multi);if(["enum","file"].includes(this.type)&&i){query(this.el).css("height","auto");let e=query(i).find(":scope div.w2ui-multi-items").get(0).clientHeight+5;(e=(e=e<20?20:e)>this.tmp["max-height"]?this.tmp["max-height"]:e)e&&(e=s),query(i).css({height:e+"px",overflow:e==this.tmp["max-height"]?"auto":"hidden"}),query(i).css("height",e+"px"),query(this.el).css({height:e+"px"})}this.tmp.current_width=e}reset(){null!=this.tmp&&(query(this.el).css("height","auto"),Array("padding-left","padding-right","background-color","border-color").forEach(e=>{this.tmp&&null!=this.tmp["old-"+e]&&(query(this.el).css(e,this.tmp["old-"+e]),delete this.tmp["old-"+e])}),clearInterval(this.tmp.sizeTimer)),query(this.el).val(this.clean(query(this.el).val())).removeClass("w2field").removeData("selected selectedIndex").off(".w2field"),Object.keys(this.helpers).forEach(e=>{query(this.helpers[e]).remove()}),this.helpers={}}clean(e){var t;return"number"!=typeof e&&(t=this.options,e=String(e).trim(),["int","float","money","currency","percent"].includes(this.type)&&("string"==typeof e&&(t.autoFormat&&(["money","currency"].includes(this.type)&&(e=String(e).replace(t.moneyRE,"")),"percent"===this.type&&(e=String(e).replace(t.percentRE,"")),["int","float"].includes(this.type)&&(e=String(e).replace(t.numberRE,""))),e=e.replace(/\s+/g,"").replace(new RegExp(t.groupSymbol,"g"),"").replace(t.decimalSymbol,".")),e=""!==e&&w2utils.isFloat(e)?Number(e):"")),e}format(e){var t=this.options;if(t.autoFormat&&""!==e){switch(this.type){case"money":case"currency":""!==(e=w2utils.formatNumber(e,t.currencyPrecision,!0))&&(e=t.currencyPrefix+e+t.currencySuffix);break;case"percent":""!==(e=w2utils.formatNumber(e,t.precision,!0))&&(e+="%");break;case"float":e=w2utils.formatNumber(e,t.precision,!0);break;case"int":e=w2utils.formatNumber(e,0,!0)}var i=parseInt(1e3).toLocaleString(w2utils.settings.locale,{useGrouping:!0}).slice(1,2);i!==this.options.groupSymbol&&(e=e.replaceAll(i,this.options.groupSymbol))}return e}change(e){if(-1!==["int","float","money","currency","percent"].indexOf(this.type)){var t=query(this.el).val(),i=this.format(this.clean(query(this.el).val()));if(""!==t&&t!=i)return query(this.el).val(i),e.stopPropagation(),e.preventDefault(),!1}if("color"===this.type){let e=query(this.el).val();"rgb"!==e.substr(0,3).toLowerCase()&&(e="#"+e,8!==(t=query(this.el).val().length)&&6!==t&&3!==t&&(e=""));i=query(this.el).get(0).nextElementSibling;query(i).find("div").css("background-color",e),query(this.el).hasClass("has-focus")&&this.updateOverlay()}if(-1!==["list","enum","file"].indexOf(this.type)&&this.refresh(),-1!==["date","time","datetime"].indexOf(this.type)){let e=parseInt(this.el.value);w2utils.isInt(this.el.value)&&3e3{this.updateOverlay()},100)}var t;"file"==this.type&&(t=query(this.el).get(0).previousElementSibling,query(t).addClass("has-focus")),query(this.el).addClass("has-focus")}}blur(e){var i,s=query(this.el).val().trim();if(query(this.el).removeClass("has-focus"),["int","float","money","currency","percent"].includes(this.type)&&""!==s){let e=s,t="";this.isStrValid(s)?(i=this.clean(s),null!=this.options.min&&i= "+this.options.min),null!=this.options.max&&i>this.options.max&&(e=this.options.max,t="Should be <= "+this.options.max)):e="",this.options.autoCorrect&&(query(this.el).val(e).trigger("input").trigger("change"),t&&(w2tooltip.show({name:this.el.id+"_error",anchor:this.el,html:t}),setTimeout(()=>{w2tooltip.hide(this.el.id+"_error")},3e3)))}["date","time","datetime"].includes(this.type)&&this.options.autoCorrect&&""!==s&&(i="date"==this.type?w2utils.isDate:"time"==this.type?w2utils.isTime:w2utils.isDateTime,w2date.inRange(this.el.value,this.options)&&i.bind(w2utils)(this.el.value,this.options.format)||query(this.el).val("").trigger("input").trigger("change")),"enum"===this.type&&query(this.helpers.multi).find("input").val("").css("width","15px"),"file"==this.type&&(s=this.el.previousElementSibling,query(s).removeClass("has-focus")),"list"===this.type&&(this.el.value=this.selected?.text??"")}keyDown(t,i){var e,s=this.options,i=t.keyCode||i&&i.keyCode;let l=!1,r,n,a,o,h,d;if(["int","float","money","currency","percent","hex","bin","color","alphanumeric"].includes(this.type)&&!(t.metaKey||t.ctrlKey||t.altKey||this.isStrValid(t.key??"1",!0)||[9,8,13,27,37,38,39,40,46].includes(t.keyCode)))return t.preventDefault(),t.stopPropagation?t.stopPropagation():t.cancelBubble=!0,!1;if(["int","float","money","currency","percent"].includes(this.type)){if(!s.keyboard||query(this.el).prop("readonly")||query(this.el).prop("disabled"))return;switch(r=parseFloat(query(this.el).val().replace(s.moneyRE,""))||0,n=s.step,(t.ctrlKey||t.metaKey)&&(n=10*s.step),i){case 38:t.shiftKey||(h=r+n<=s.max||null==s.max?Number((r+n).toFixed(12)):s.max,query(this.el).val(h).trigger("input").trigger("change"),l=!0);break;case 40:t.shiftKey||(h=r-n>=s.min||null==s.min?Number((r-n).toFixed(12)):s.min,query(this.el).val(h).trigger("input").trigger("change"),l=!0)}l&&(t.preventDefault(),this.moveCaret2end())}if(["date","datetime"].includes(this.type)){if(!s.keyboard||query(this.el).prop("readonly")||query(this.el).prop("disabled"))return;var u=("date"==this.type?w2utils.isDate:w2utils.isDateTime).bind(w2utils),c=("date"==this.type?w2utils.formatDate:w2utils.formatDateTime).bind(w2utils);switch(a=864e5,n=1,(t.ctrlKey||t.metaKey)&&(n=10),(o=u(query(this.el).val(),s.format,!0))||(o=new Date,a=0),i){case 38:t.shiftKey||(10==n?o.setMonth(o.getMonth()+1):o.setTime(o.getTime()+a),d=c(o.getTime(),s.format),query(this.el).val(d).trigger("input").trigger("change"),l=!0);break;case 40:t.shiftKey||(10==n?o.setMonth(o.getMonth()-1):o.setTime(o.getTime()-a),d=c(o.getTime(),s.format),query(this.el).val(d).trigger("input").trigger("change"),l=!0)}l&&(t.preventDefault(),this.moveCaret2end(),this.updateOverlay())}if("time"===this.type){if(!s.keyboard||query(this.el).prop("readonly")||query(this.el).prop("disabled"))return;n=t.ctrlKey||t.metaKey?60:1,r=query(this.el).val();let e=w2date.str2min(r)||w2date.str2min((new Date).getHours()+":"+((new Date).getMinutes()-1));switch(i){case 38:t.shiftKey||(e+=n,l=!0);break;case 40:t.shiftKey||(e-=n,l=!0)}l&&(t.preventDefault(),query(this.el).val(w2date.min2str(e)).trigger("input").trigger("change"),this.moveCaret2end())}if(["list","enum"].includes(this.type))switch(i){case 8:case 46:"list"==this.type?""==query(this.helpers.search_focus).val()&&(this.selected=null,w2menu.hide(this.el.id+"_menu"),query(this.el).val("").trigger("input").trigger("change")):""==query(this.helpers.multi).find("input").val()&&(w2menu.hide(this.el.id+"_menu"),this.selected.pop(),(e=w2menu.get(this.el.id+"_menu"))&&(e.options.selected=this.selected),this.refresh());break;case 9:case 16:break;case 27:w2menu.hide(this.el.id+"_menu"),this.refresh()}}keyUp(t){if("list"==this.type){let e=query(this.helpers.search_focus);""!==e.val()?query(this.el).attr("placeholder",""):query(this.el).attr("placeholder",this.tmp.pholder),13==t.keyCode?setTimeout(()=>{e.val(""),w2menu.hide(this.el.id+"_menu"),this.refresh()},1):[8,9,16,27,46].includes(t.keyCode)?w2menu.hide(this.el.id+"_menu"):this.updateOverlay(),this.refresh()}var e;"combo"==this.type&&this.updateOverlay(),"enum"==this.type&&(t=this.helpers.multi.find("input"),e=getComputedStyle(t.get(0)),e=w2utils.getStrWidth(t.val(),`font-family: ${e["font-family"]}; font-size: ${e["font-size"]};`),t.css({width:e+15+"px"}),this.resize())}findItemIndex(e,i,s){let l=[];return s=s||[],e.forEach((e,t)=>{e.id===i&&(l=s.concat([t]),this.options.index=[t]),0==l.length&&e.items&&0{e=e.detail.color;query(this.el).val(e).trigger("input").trigger("change")}).liveUpdate(e=>{e=e.detail.color;query(this.helpers.suffix).find(":scope > div").css("background-color","#"+e)})}if(["list","combo","enum"].includes(this.type)){var t;this.el;let s=this.el;if("enum"===this.type&&(t=this.helpers.multi.get(0),s=query(t).find("input").get(0)),"list"===this.type&&(t=this.selected,w2utils.isPlainObject(t)&&0{var t,i;["list","combo"].includes(this.type)?(this.selected=e.detail.item,query(s).val(""),query(this.el).val(this.selected.text).trigger("input").trigger("change"),this.focus({showMenu:!1})):(i=this.selected,(t=e.detail?.item)&&!0!==(e=this.trigger("add",{target:this.el,item:t,originalEvent:e})).isCancelled&&(i.length>=l.max&&0{e=e.detail.date;null!=e&&query(this.el).val(e).trigger("input").trigger("change")})}isStrValid(e,t){let i=!0;switch(this.type){case"int":i=!(!t||!["-",this.options.groupSymbol].includes(e))||w2utils.isInt(e.replace(this.options.numberRE,""));break;case"percent":e=e.replace(/%/g,"");case"float":i=!(!t||!["-","",this.options.decimalSymbol,this.options.groupSymbol].includes(e))||w2utils.isFloat(e.replace(this.options.numberRE,""));break;case"money":case"currency":i=!(!t||!["-",this.options.decimalSymbol,this.options.groupSymbol,this.options.currencyPrefix,this.options.currencySuffix].includes(e))||w2utils.isFloat(e.replace(this.options.moneyRE,""));break;case"bin":i=w2utils.isBin(e);break;case"color":case"hex":i=w2utils.isHex(e);break;case"alphanumeric":i=w2utils.isAlphaNumeric(e)}return i}addPrefix(){var e,t;this.options.prefix&&(t=getComputedStyle(this.el),null==this.tmp["old-padding-left"]&&(this.tmp["old-padding-left"]=t["padding-left"]),this.helpers.prefix&&query(this.helpers.prefix).remove(),query(this.el).before(`
      ${this.options.prefix}
      `),e=query(this.el).get(0).previousElementSibling,query(e).css({color:t.color,"font-family":t["font-family"],"font-size":t["font-size"],height:this.el.clientHeight+"px","padding-top":t["padding-top"],"padding-bottom":t["padding-bottom"],"padding-left":this.tmp["old-padding-left"],"padding-right":0,"margin-top":parseInt(t["margin-top"],10)+2+"px","margin-bottom":parseInt(t["margin-bottom"],10)+1+"px","margin-left":t["margin-left"],"margin-right":0,"z-index":1}),query(this.el).css("padding-left",e.clientWidth+"px !important"),this.helpers.prefix=e)}addSuffix(){if(this.options.prefix||this.options.arrow){let e,t=this;var i=getComputedStyle(this.el),s=(null==this.tmp["old-padding-right"]&&(this.tmp["old-padding-right"]=i["padding-right"]),parseInt(i["padding-right"]||0));this.options.arrow&&(this.helpers.arrow&&query(this.helpers.arrow).remove(),query(this.el).after('
       
      '),e=query(this.el).get(0).nextElementSibling,query(e).css({color:i.color,"font-family":i["font-family"],"font-size":i["font-size"],height:this.el.clientHeight+"px",padding:0,"margin-top":parseInt(i["margin-top"],10)+1+"px","margin-bottom":0,"border-left":"1px solid silver",width:"16px",transform:"translateX(-100%)"}).on("mousedown",function(e){query(e.target).hasClass("arrow-up")&&t.keyDown(e,{keyCode:38}),query(e.target).hasClass("arrow-down")&&t.keyDown(e,{keyCode:40})}),s+=e.clientWidth,query(this.el).css("padding-right",s+"px !important"),this.helpers.arrow=e),""!==this.options.suffix&&(this.helpers.suffix&&query(this.helpers.suffix).remove(),query(this.el).after(`
      ${this.options.suffix}
      `),e=query(this.el).get(0).nextElementSibling,query(e).css({color:i.color,"font-family":i["font-family"],"font-size":i["font-size"],height:this.el.clientHeight+"px","padding-top":i["padding-top"],"padding-bottom":i["padding-bottom"],"padding-left":0,"padding-right":i["padding-right"],"margin-top":parseInt(i["margin-top"],10)+2+"px","margin-bottom":parseInt(i["margin-bottom"],10)+1+"px",transform:"translateX(-100%)"}),query(this.el).css("padding-right",e.clientWidth+"px !important"),this.helpers.suffix=e)}}addSearch(){if("list"===this.type){this.helpers.search&&query(this.helpers.search).remove();let e=parseInt(query(this.el).attr("tabIndex")),t=(isNaN(e)||-1===e||(this.tmp["old-tabIndex"]=e),null!=(e=this.tmp["old-tabIndex"]?this.tmp["old-tabIndex"]:e)&&!isNaN(e)||(e=0),"");var i=` -
      - - -
      `,i=(query(this.el).attr("tabindex",-1).before(i),query(this.el).get(0).previousElementSibling),s=(this.helpers.search=i,this.helpers.search_focus=query(i).find("input").get(0),getComputedStyle(this.el));query(i).css({width:this.el.clientWidth+"px","margin-top":s["margin-top"],"margin-left":s["margin-left"],"margin-bottom":s["margin-bottom"],"margin-right":s["margin-right"]}).find("input").css({cursor:"default",width:"100%",opacity:1,padding:s.padding,margin:s.margin,border:"1px solid transparent","background-color":"transparent"}),query(i).find("input").off(".helper").on("focus.helper",e=>{query(e.target).val(""),this.tmp.pholder=query(this.el).attr("placeholder")??"",this.focus(e),e.stopPropagation()}).on("blur.helper",e=>{query(e.target).val(""),null!=this.tmp.pholder&&query(this.el).attr("placeholder",this.tmp.pholder),this.blur(e),e.stopPropagation()}).on("keydown.helper",e=>{this.keyDown(e)}).on("keyup.helper",e=>{this.keyUp(e)}),query(i).on("click",e=>{query(e.target).find("input").focus()})}}addMultiSearch(){if(["enum","file"].includes(this.type)){query(this.helpers.multi).remove();let e="";var l,r,n=getComputedStyle(this.el),a=w2utils.stripSpaces(` - margin-top: 0px; - margin-bottom: 0px; - margin-left: ${n["margin-left"]}; - margin-right: ${n["margin-right"]}; - width: ${w2utils.getSize(this.el,"width")-parseInt(n["margin-left"],10)-parseInt(n["margin-right"],10)}px; - `);null==this.tmp["min-height"]&&(l=this.tmp["min-height"]=parseInt(("none"!=n["min-height"]?n["min-height"]:0)||0),r=parseInt(n.height),this.tmp["min-height"]=Math.max(l,r)),null==this.tmp["max-height"]&&"none"!=n["max-height"]&&(this.tmp["max-height"]=parseInt(n["max-height"]));let t="",i=(null!=query(this.el).attr("id")&&(t=`id="${query(this.el).attr("id")}_search"`),parseInt(query(this.el).attr("tabIndex"))),s=(isNaN(i)||-1===i||(this.tmp["old-tabIndex"]=i),null!=(i=this.tmp["old-tabIndex"]?this.tmp["old-tabIndex"]:i)&&!isNaN(i)||(i=0),"enum"===this.type&&(e=` -
      -
      - -
      -
      `),"file"===this.type&&(e=` -
      -
      - -
      -
      - -
      -
      `),this.tmp["old-background-color"]=n["background-color"],this.tmp["old-border-color"]=n["border-color"],query(this.el).before(e).css({"border-color":"transparent","background-color":"transparent"}),query(this.el.previousElementSibling));this.helpers.multi=s,query(this.el).attr("tabindex",-1),s.on("click",e=>{this.focus(e)}),s.find("input:not(.file-input)").on("click",e=>{this.click(e)}).on("focus",e=>{this.focus(e)}).on("blur",e=>{this.blur(e)}).on("keydown",e=>{this.keyDown(e)}).on("keyup",e=>{this.keyUp(e)}),"file"===this.type&&s.find("input.file-input").off(".drag").on("click.drag",e=>{e.stopPropagation(),query(this.el).prop("readonly")||query(this.el).prop("disabled")||this.focus(e)}).on("dragenter.drag",e=>{query(this.el).prop("readonly")||query(this.el).prop("disabled")||s.addClass("w2ui-file-dragover")}).on("dragleave.drag",e=>{query(this.el).prop("readonly")||query(this.el).prop("disabled")||s.removeClass("w2ui-file-dragover")}).on("drop.drag",e=>{query(this.el).prop("readonly")||query(this.el).prop("disabled")||(s.removeClass("w2ui-file-dragover"),Array.from(e.dataTransfer.files).forEach(e=>{this.addFile(e)}),this.focus(e),e.preventDefault(),e.stopPropagation())}).on("dragover.drag",e=>{e.preventDefault(),e.stopPropagation()}).on("change.drag",e=>{void 0!==e.target.files&&Array.from(e.target.files).forEach(e=>{this.addFile(e)}),this.focus(e)}),this.refresh()}}addFile(t){var e=this.options,s=this.selected;let l={name:t.name,type:t.type,modified:t.lastModifiedDate,size:t.size,content:null,file:t},i=0,r=0,n=[],a=(Array.isArray(s)&&s.forEach(e=>{e.name==t.name&&e.size==t.size&&n.push(w2utils.lang('The file "${name}" (${size}) is already added.',{name:t.name,size:w2utils.formatSize(t.size)})),i+=e.size,r++}),0!==e.maxFileSize&&l.size>e.maxFileSize&&n.push(w2utils.lang("Maximum file size is ${size}",{size:w2utils.formatSize(e.maxFileSize)})),0!==e.maxSize&&i+l.size>e.maxSize&&n.push(w2utils.lang("Maximum total size is ${size}",{size:w2utils.formatSize(e.maxSize)})),0!==e.max&&r>=e.max&&n.push(w2utils.lang("Maximum number of files is ${count}",{count:e.max})),this.trigger("add",{target:this.el,file:l,total:r,totalSize:i,errors:n}));if(!0!==a.isCancelled)if(!0!==e.silent&&0")),console.log("ERRORS (while adding files): ",n);else if(s.push(l),"undefined"!=typeof FileReader&&!0===e.readContent){s=new FileReader;let i=this;s.onload=function(e){var e=e.target.result,t=e.indexOf(",");l.content=e.substr(t+1),i.refresh(),query(i.el).trigger("input").trigger("change"),a.finish()},s.readAsDataURL(t)}else this.refresh(),query(this.el).trigger("input").trigger("change"),a.finish()}moveCaret2end(){setTimeout(()=>{this.el.setSelectionRange(this.el.value.length,this.el.value.length)},0)}}!function(r){function e(){var t,i;t=window,i={w2ui:w2ui,w2utils:w2utils,query:query,w2locale:w2locale,w2event:w2event,w2base:w2base,w2popup:w2popup,w2alert:w2alert,w2confirm:w2confirm,w2prompt:w2prompt,Dialog:Dialog,w2tooltip:w2tooltip,w2menu:w2menu,w2color:w2color,w2date:w2date,Tooltip:Tooltip,w2toolbar:w2toolbar,w2sidebar:w2sidebar,w2tabs:w2tabs,w2layout:w2layout,w2grid:w2grid,w2form:w2form,w2field:w2field},Object.keys(i).forEach(e=>{t[e]=i[e]})}var t=String(void 0).split("?")[1]||"";function i(t,i){var e;if(r.isPlainObject(t)){let e;return"w2form"==i&&(e=new w2form(t),0{let i=r(t).data("w2field");return i,(i=new w2field(s,l)).render(t),i})},r.fn.w2form=function(e){return i.call(this,e,"w2form")},r.fn.w2grid=function(e){return i.call(this,e,"w2grid")},r.fn.w2layout=function(e){return i.call(this,e,"w2layout")},r.fn.w2sidebar=function(e){return i.call(this,e,"w2sidebar")},r.fn.w2tabs=function(e){return i.call(this,e,"w2tabs")},r.fn.w2toolbar=function(e){return i.call(this,e,"w2toolbar")},r.fn.w2popup=function(e){0{w2utils.marker(t,i)})},r.fn.w2tag=function(i,s){return this.each((e,t)=>{null==i&&null==s?w2tooltip.hide():("object"==typeof i?s=i:(s=s??{}).html=i,w2tooltip.show(t,s))})},r.fn.w2overlay=function(i,s){return this.each((e,t)=>{null==i&&null==s?w2tooltip.hide():("object"==typeof i?s=i:s.html=i,Object.assign(s,{class:"w2ui-white",hideOn:["doc-click"]}),w2tooltip.show(t,s))})},r.fn.w2menu=function(i,s){return this.each((e,t)=>{"object"==typeof i&&(s=i),"object"==typeof i?s=i:s.items=i,w2menu.show(t,s)})},r.fn.w2color=function(i,s){return this.each((e,t)=>{t=w2color.show(t,i);"function"==typeof s&&t.select(s)})})}(window.jQuery),function(t,i){if("function"==typeof define&&define.amd)return define(()=>i);if("undefined"!=typeof exports){if("undefined"!=typeof module&&module.exports)return exports=module.exports=i;t=exports}t&&Object.keys(i).forEach(e=>{t[e]=i[e]})}(self,{w2ui:w2ui,w2utils:w2utils,query:query,w2locale:w2locale,w2event:w2event,w2base:w2base,w2popup:w2popup,w2alert:w2alert,w2confirm:w2confirm,w2prompt:w2prompt,Dialog:Dialog,w2tooltip:w2tooltip,w2menu:w2menu,w2color:w2color,w2date:w2date,Tooltip:Tooltip,w2toolbar:w2toolbar,w2sidebar:w2sidebar,w2tabs:w2tabs,w2layout:w2layout,w2grid:w2grid,w2form:w2form,w2field:w2field}); \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/proposal_generator/__init__.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/proposal_generator/__init__.py deleted file mode 100644 index 3f4e4df7645c67b7a013295207b98fe70b2e574c..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/proposal_generator/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator -from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead - -__all__ = list(globals().keys()) diff --git a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiofiles/tempfile/temptypes.py b/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiofiles/tempfile/temptypes.py deleted file mode 100644 index dccee6ce53c9ee4aa134d0f9d6a76af3f9846099..0000000000000000000000000000000000000000 --- a/spaces/camilosegura/traductor-multilenguaje/Lib/site-packages/aiofiles/tempfile/temptypes.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Async wrappers for spooled temp files and temp directory objects""" -from functools import partial - -from ..base import AsyncBase -from ..threadpool.utils import ( - cond_delegate_to_executor, - delegate_to_executor, - proxy_property_directly, -) - - -@delegate_to_executor("fileno", "rollover") -@cond_delegate_to_executor( - "close", - "flush", - "isatty", - "read", - "readline", - "readlines", - "seek", - "tell", - "truncate", -) -@proxy_property_directly("closed", "encoding", "mode", "name", "newlines") -class AsyncSpooledTemporaryFile(AsyncBase): - """Async wrapper for SpooledTemporaryFile class""" - - async def _check(self): - if self._file._rolled: - return - max_size = self._file._max_size - if max_size and self._file.tell() > max_size: - await self.rollover() - - async def write(self, s): - """Implementation to anticipate rollover""" - if self._file._rolled: - cb = partial(self._file.write, s) - return await self._loop.run_in_executor(self._executor, cb) - else: - file = self._file._file # reference underlying base IO object - rv = file.write(s) - await self._check() - return rv - - async def writelines(self, iterable): - """Implementation to anticipate rollover""" - if self._file._rolled: - cb = partial(self._file.writelines, iterable) - return await self._loop.run_in_executor(self._executor, cb) - else: - file = self._file._file # reference underlying base IO object - rv = file.writelines(iterable) - await self._check() - return rv - - -@delegate_to_executor("cleanup") -@proxy_property_directly("name") -class AsyncTemporaryDirectory: - """Async wrapper for TemporaryDirectory class""" - - def __init__(self, file, loop, executor): - self._file = file - self._loop = loop - self._executor = executor - - async def close(self): - await self.cleanup() diff --git a/spaces/catontheturntable/Ghibli-Diffusion/app.py b/spaces/catontheturntable/Ghibli-Diffusion/app.py deleted file mode 100644 index 25e4911d6481344a01f0ab7867dabd1f3d130e7a..0000000000000000000000000000000000000000 --- a/spaces/catontheturntable/Ghibli-Diffusion/app.py +++ /dev/null @@ -1,10 +0,0 @@ -import gradio as gr - -description = """
      - -
      -

      Ghibli Diffusion -This is the fine-tuned Stable Diffusion model trained on images from modern anime feature films from Studio Ghibli. Use the tokens ghibli style in your prompts for the effect.

      - """ - -gr.Interface.load("models/nitrosocke/Ghibli-Diffusion", description=description, examples=[["superman ghibli style"]]).launch() diff --git a/spaces/celise88/Pathfinder/templates/job_neighborhoods.html b/spaces/celise88/Pathfinder/templates/job_neighborhoods.html deleted file mode 100644 index d5796bedbd2d2684af30a0a5fe73f5bee0e5d0af..0000000000000000000000000000000000000000 --- a/spaces/celise88/Pathfinder/templates/job_neighborhoods.html +++ /dev/null @@ -1,14 +0,0 @@ - - - -
      -
      - - \ No newline at end of file diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/smb.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/smb.py deleted file mode 100644 index 9892f469d563fec7041a2abc68416a19fd96888c..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/smb.py +++ /dev/null @@ -1,309 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module contains SMBFileSystem class responsible for handling access to -Windows Samba network shares by using package smbprotocol -""" - -import datetime -import uuid -from stat import S_ISDIR, S_ISLNK - -import smbclient - -from .. import AbstractFileSystem -from ..utils import infer_storage_options - -# ! pylint: disable=bad-continuation - - -class SMBFileSystem(AbstractFileSystem): - """Allow reading and writing to Windows and Samba network shares. - - When using `fsspec.open()` for getting a file-like object the URI - should be specified as this format: - ``smb://workgroup;user:password@server:port/share/folder/file.csv``. - - Example:: - - >>> import fsspec - >>> with fsspec.open( - ... 'smb://myuser:mypassword@myserver.com/' 'share/folder/file.csv' - ... ) as smbfile: - ... df = pd.read_csv(smbfile, sep='|', header=None) - - Note that you need to pass in a valid hostname or IP address for the host - component of the URL. Do not use the Windows/NetBIOS machine name for the - host component. - - The first component of the path in the URL points to the name of the shared - folder. Subsequent path components will point to the directory/folder/file. - - The URL components ``workgroup`` , ``user``, ``password`` and ``port`` may be - optional. - - .. note:: - - For working this source require `smbprotocol`_ to be installed, e.g.:: - - $ pip install smbprotocol - # or - # pip install smbprotocol[kerberos] - - .. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements - - Note: if using this with the ``open`` or ``open_files``, with full URLs, - there is no way to tell if a path is relative, so all paths are assumed - to be absolute. - """ - - protocol = "smb" - - # pylint: disable=too-many-arguments - def __init__( - self, - host, - port=None, - username=None, - password=None, - timeout=60, - encrypt=None, - share_access=None, - **kwargs, - ): - """ - You can use _get_kwargs_from_urls to get some kwargs from - a reasonable SMB url. - - Authentication will be anonymous or integrated if username/password are not - given. - - Parameters - ---------- - host: str - The remote server name/ip to connect to - port: int - Port to connect with. Usually 445, sometimes 139. - username: str or None - Username to connect with. Required if Kerberos auth is not being used. - password: str or None - User's password on the server, if using username - timeout: int - Connection timeout in seconds - encrypt: bool - Whether to force encryption or not, once this has been set to True - the session cannot be changed back to False. - share_access: str or None - Specifies the default access applied to file open operations - performed with this file system object. - This affects whether other processes can concurrently open a handle - to the same file. - - - None (the default): exclusively locks the file until closed. - - 'r': Allow other handles to be opened with read access. - - 'w': Allow other handles to be opened with write access. - - 'd': Allow other handles to be opened with delete access. - """ - super(SMBFileSystem, self).__init__(**kwargs) - self.host = host - self.port = port - self.username = username - self.password = password - self.timeout = timeout - self.encrypt = encrypt - self.temppath = kwargs.pop("temppath", "") - self.share_access = share_access - self._connect() - - def _connect(self): - smbclient.register_session( - self.host, - username=self.username, - password=self.password, - port=445 if self.port is None else self.port, - encrypt=self.encrypt, - connection_timeout=self.timeout, - ) - - @classmethod - def _strip_protocol(cls, path): - return infer_storage_options(path)["path"] - - @staticmethod - def _get_kwargs_from_urls(path): - # smb://workgroup;user:password@host:port/share/folder/file.csv - out = infer_storage_options(path) - out.pop("path", None) - out.pop("protocol", None) - return out - - def mkdir(self, path, create_parents=True, **kwargs): - wpath = _as_unc_path(self.host, path) - if create_parents: - smbclient.makedirs(wpath, exist_ok=False, **kwargs) - else: - smbclient.mkdir(wpath, **kwargs) - - def makedirs(self, path, exist_ok=False): - if _share_has_path(path): - wpath = _as_unc_path(self.host, path) - smbclient.makedirs(wpath, exist_ok=exist_ok) - - def rmdir(self, path): - if _share_has_path(path): - wpath = _as_unc_path(self.host, path) - smbclient.rmdir(wpath) - - def info(self, path, **kwargs): - wpath = _as_unc_path(self.host, path) - stats = smbclient.stat(wpath, **kwargs) - if S_ISDIR(stats.st_mode): - stype = "directory" - elif S_ISLNK(stats.st_mode): - stype = "link" - else: - stype = "file" - res = { - "name": path + "/" if stype == "directory" else path, - "size": stats.st_size, - "type": stype, - "uid": stats.st_uid, - "gid": stats.st_gid, - "time": stats.st_atime, - "mtime": stats.st_mtime, - } - return res - - def created(self, path): - """Return the created timestamp of a file as a datetime.datetime""" - wpath = _as_unc_path(self.host, path) - stats = smbclient.stat(wpath) - return datetime.datetime.utcfromtimestamp(stats.st_ctime) - - def modified(self, path): - """Return the modified timestamp of a file as a datetime.datetime""" - wpath = _as_unc_path(self.host, path) - stats = smbclient.stat(wpath) - return datetime.datetime.utcfromtimestamp(stats.st_mtime) - - def ls(self, path, detail=True, **kwargs): - unc = _as_unc_path(self.host, path) - listed = smbclient.listdir(unc, **kwargs) - dirs = ["/".join([path.rstrip("/"), p]) for p in listed] - if detail: - dirs = [self.info(d) for d in dirs] - return dirs - - # pylint: disable=too-many-arguments - def _open( - self, - path, - mode="rb", - block_size=-1, - autocommit=True, - cache_options=None, - **kwargs, - ): - """ - block_size: int or None - If 0, no buffering, 1, line buffering, >1, buffer that many bytes - - Notes - ----- - By specifying 'share_access' in 'kwargs' it is possible to override the - default shared access setting applied in the constructor of this object. - """ - bls = block_size if block_size is not None and block_size >= 0 else -1 - wpath = _as_unc_path(self.host, path) - share_access = kwargs.pop("share_access", self.share_access) - if "w" in mode and autocommit is False: - temp = _as_temp_path(self.host, path, self.temppath) - return SMBFileOpener(wpath, temp, mode, block_size=bls, **kwargs) - return smbclient.open_file( - wpath, mode, buffering=bls, share_access=share_access, **kwargs - ) - - def copy(self, path1, path2, **kwargs): - """Copy within two locations in the same filesystem""" - wpath1 = _as_unc_path(self.host, path1) - wpath2 = _as_unc_path(self.host, path2) - smbclient.copyfile(wpath1, wpath2, **kwargs) - - def _rm(self, path): - if _share_has_path(path): - wpath = _as_unc_path(self.host, path) - stats = smbclient.stat(wpath) - if S_ISDIR(stats.st_mode): - smbclient.rmdir(wpath) - else: - smbclient.remove(wpath) - - def mv(self, path1, path2, **kwargs): - wpath1 = _as_unc_path(self.host, path1) - wpath2 = _as_unc_path(self.host, path2) - smbclient.rename(wpath1, wpath2, **kwargs) - - -def _as_unc_path(host, path): - rpath = path.replace("/", "\\") - unc = "\\\\{}{}".format(host, rpath) - return unc - - -def _as_temp_path(host, path, temppath): - share = path.split("/")[1] - temp_file = "/{}{}/{}".format(share, temppath, uuid.uuid4()) - unc = _as_unc_path(host, temp_file) - return unc - - -def _share_has_path(path): - parts = path.count("/") - if path.endswith("/"): - return parts > 2 - return parts > 1 - - -class SMBFileOpener(object): - """writes to remote temporary file, move on commit""" - - def __init__(self, path, temp, mode, block_size=-1, **kwargs): - self.path = path - self.temp = temp - self.mode = mode - self.block_size = block_size - self.kwargs = kwargs - self.smbfile = None - self._incontext = False - self._open() - - def _open(self): - if self.smbfile is None or self.smbfile.closed: - self.smbfile = smbclient.open_file( - self.temp, self.mode, buffering=self.block_size, **self.kwargs - ) - - def commit(self): - """Move temp file to definitive on success.""" - # TODO: use transaction support in SMB protocol - smbclient.replace(self.temp, self.path) - - def discard(self): - """Remove the temp file on failure.""" - smbclient.remove(self.temp) - - def __fspath__(self): - return self.path - - def __iter__(self): - return self.smbfile.__iter__() - - def __getattr__(self, item): - return getattr(self.smbfile, item) - - def __enter__(self): - self._incontext = True - return self.smbfile.__enter__() - - def __exit__(self, exc_type, exc_value, traceback): - self._incontext = False - self.smbfile.__exit__(exc_type, exc_value, traceback) diff --git a/spaces/chuanenlin/foodnet/README.md b/spaces/chuanenlin/foodnet/README.md deleted file mode 100644 index 0305f7062b850cc2a41bcab85e591ffc080be6b1..0000000000000000000000000000000000000000 --- a/spaces/chuanenlin/foodnet/README.md +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: FoodNet -emoji: 🍔 -colorFrom: purple -colorTo: purple -sdk: streamlit -app_file: foodnet.py - ---- - -# 24-679 FoodNet Project - -## Authors - -David Chuan-En Lin: chuanenl@cs.cmu.edu - -Mitch Fogelson: mfogelso@andrew.cmu.edu - -Sunny Yang: yundiy@andrew.cmu.edu - -Shihao Xu: shihaoxu@andrew.cmu.edu - -## TODO - -### Must Have - -1. Cooking method (How to do this?) (TBD) -2. Ingredients -> Recipe (Recipe Querey?) (Mitch) -3. Cuisine Meta Data (Where to get) (TBD) -4. Deployment on the cloud -> (David) - -### Like to have - -1. Images related -> - - * [Google Image Search API](https://pypi.org/project/Google-Images-Search/) - * [OpenAI Clip](https://openai.com/api/) - -2. User Studies - -### Moonshot - -1. Recipe Masking Prediction -2. - -## Description - -We wanted to help students and households in the Pittsburgh to reduce their food waste. We developed a model that suggests recipes based on current leftovers availible. - -* Model -> Facebook's [FastText](https://radimrehurek.com/gensim/models/fasttext.html) -* Dataset -> [Simplified 1M+ Recipes](https://github.com/schmidtdominik/RecipeNet) - * [Dominick Schmidt Blog](https://dominikschmidt.xyz/simplified-recipes-1M/#dataset-sources) - -## Try WebApp - -https://huggingface.co/spaces/chuanenlin/foodnet - -## Quick Start - -1. Clone repository - -``` -git clone git@github.com:chuanenlin/foodnet.git -``` - -2. Move into repository - -``` -cd foodnet -``` - -(**Optional** Create conda environment) - -3. Install gdown - -``` -pip install gdown -``` - -4. Download models - -``` -gdown https://drive.google.com/drive/folders/1LlQpd45E71dSfC8FgvIhJjQjqxnlBC9j -O ./models --folder -``` - -5. Download datasets (Optional) - -``` -gdown https://drive.google.com/drive/folders/18aA3BFKqzkqNz5L4N5vN6bFnp8Ch2CQV -O ./data --folder -``` - -6. Install Dependencies - -``` -pip install -r requirements.txt -``` - -7. Run code - -``` -streamlit run foodnet.py -``` - -## Args - -Train new model - -``` -streamlit run foodnet.py -d/--dataset ['/PATH/TO/DATASET'] -t/--train True -``` - -Load alternative model - -``` -streamlit run foodnet.py --model ['/PATH/TO/MODEL'] -``` - -## Requirements - -* python>=3.6 -* gensim>=4.0.x -* streamlit -* gdown -* nltk -* pickle -* matplotlib - -## References - -TODO diff --git a/spaces/cncn102/bingo1/src/pages/api/sydney.ts b/spaces/cncn102/bingo1/src/pages/api/sydney.ts deleted file mode 100644 index 8bd7074bc72bd2803e4acf89d3814908893ff044..0000000000000000000000000000000000000000 --- a/spaces/cncn102/bingo1/src/pages/api/sydney.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - const id = headers['x-forwarded-for'] - - debug(id, headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - debug(id, 'timeout') - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 3000) - closeDog.watch(() => { - debug(id, 'timeout close') - ws.close() - }, 20000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug(id, 'connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug(id, 'ws close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - debug(id, 'connection close') - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_jpeg.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_jpeg.c deleted file mode 100644 index 5921d624a183311cd5952f9912758d6f6b78260a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/cbs_jpeg.c +++ /dev/null @@ -1,444 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "cbs.h" -#include "cbs_internal.h" -#include "cbs_jpeg.h" - - -#define HEADER(name) do { \ - ff_cbs_trace_header(ctx, name); \ - } while (0) - -#define CHECK(call) do { \ - err = (call); \ - if (err < 0) \ - return err; \ - } while (0) - -#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]){ subs, __VA_ARGS__ }) : NULL) - -#define u(width, name, range_min, range_max) \ - xu(width, name, range_min, range_max, 0, ) -#define us(width, name, sub, range_min, range_max) \ - xu(width, name, range_min, range_max, 1, sub) - - -#define READ -#define READWRITE read -#define RWContext GetBitContext -#define FUNC(name) cbs_jpeg_read_ ## name - -#define xu(width, name, range_min, range_max, subs, ...) do { \ - uint32_t value; \ - CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - &value, range_min, range_max)); \ - current->name = value; \ - } while (0) - -#include "cbs_jpeg_syntax_template.c" - -#undef READ -#undef READWRITE -#undef RWContext -#undef FUNC -#undef xu - -#define WRITE -#define READWRITE write -#define RWContext PutBitContext -#define FUNC(name) cbs_jpeg_write_ ## name - -#define xu(width, name, range_min, range_max, subs, ...) do { \ - uint32_t value = current->name; \ - CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \ - SUBSCRIPTS(subs, __VA_ARGS__), \ - value, range_min, range_max)); \ - } while (0) - - -#include "cbs_jpeg_syntax_template.c" - -#undef WRITE -#undef READWRITE -#undef RWContext -#undef FUNC -#undef xu - - -static int cbs_jpeg_split_fragment(CodedBitstreamContext *ctx, - CodedBitstreamFragment *frag, - int header) -{ - AVBufferRef *data_ref; - uint8_t *data; - size_t data_size; - int start, end, marker, next_start, next_marker; - int err, i, j, length; - - if (frag->data_size < 4) { - // Definitely too short to be meaningful. - return AVERROR_INVALIDDATA; - } - - for (i = 0; i + 1 < frag->data_size && frag->data[i] != 0xff; i++); - if (i > 0) { - av_log(ctx->log_ctx, AV_LOG_WARNING, "Discarding %d bytes at " - "beginning of image.\n", i); - } - for (++i; i + 1 < frag->data_size && frag->data[i] == 0xff; i++); - if (i + 1 >= frag->data_size && frag->data[i]) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " - "no SOI marker found.\n"); - return AVERROR_INVALIDDATA; - } - marker = frag->data[i]; - if (marker != JPEG_MARKER_SOI) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: first " - "marker is %02x, should be SOI.\n", marker); - return AVERROR_INVALIDDATA; - } - for (++i; i + 1 < frag->data_size && frag->data[i] == 0xff; i++); - if (i + 1 >= frag->data_size) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " - "no image content found.\n"); - return AVERROR_INVALIDDATA; - } - marker = frag->data[i]; - start = i + 1; - - do { - if (marker == JPEG_MARKER_EOI) { - break; - } else if (marker == JPEG_MARKER_SOS) { - next_marker = -1; - end = start; - for (i = start; i + 1 < frag->data_size; i++) { - if (frag->data[i] != 0xff) - continue; - end = i; - for (++i; i + 1 < frag->data_size && - frag->data[i] == 0xff; i++); - if (i + 1 < frag->data_size) { - if (frag->data[i] == 0x00) - continue; - next_marker = frag->data[i]; - next_start = i + 1; - } - break; - } - } else { - i = start; - if (i + 2 > frag->data_size) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " - "truncated at %02x marker.\n", marker); - return AVERROR_INVALIDDATA; - } - length = AV_RB16(frag->data + i); - if (i + length > frag->data_size) { - av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " - "truncated at %02x marker segment.\n", marker); - return AVERROR_INVALIDDATA; - } - end = start + length; - - i = end; - if (frag->data[i] != 0xff) { - next_marker = -1; - } else { - for (++i; i + 1 < frag->data_size && - frag->data[i] == 0xff; i++); - if (i + 1 >= frag->data_size) { - next_marker = -1; - } else { - next_marker = frag->data[i]; - next_start = i + 1; - } - } - } - - if (marker == JPEG_MARKER_SOS) { - length = AV_RB16(frag->data + start); - - if (length > end - start) - return AVERROR_INVALIDDATA; - - data_ref = NULL; - data = av_malloc(end - start + - AV_INPUT_BUFFER_PADDING_SIZE); - if (!data) - return AVERROR(ENOMEM); - - memcpy(data, frag->data + start, length); - for (i = start + length, j = length; i < end; i++, j++) { - if (frag->data[i] == 0xff) { - while (frag->data[i] == 0xff) - ++i; - data[j] = 0xff; - } else { - data[j] = frag->data[i]; - } - } - data_size = j; - - memset(data + data_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - - } else { - data = frag->data + start; - data_size = end - start; - data_ref = frag->data_ref; - } - - err = ff_cbs_append_unit_data(frag, marker, - data, data_size, data_ref); - if (err < 0) - return err; - - marker = next_marker; - start = next_start; - } while (next_marker != -1); - - return 0; -} - -static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit) -{ - GetBitContext gbc; - int err; - - err = init_get_bits(&gbc, unit->data, 8 * unit->data_size); - if (err < 0) - return err; - - err = ff_cbs_alloc_unit_content(ctx, unit); - if (err < 0) - return err; - - if (unit->type >= JPEG_MARKER_SOF0 && - unit->type <= JPEG_MARKER_SOF3) { - err = cbs_jpeg_read_frame_header(ctx, &gbc, unit->content); - if (err < 0) - return err; - - } else if (unit->type >= JPEG_MARKER_APPN && - unit->type <= JPEG_MARKER_APPN + 15) { - err = cbs_jpeg_read_application_data(ctx, &gbc, unit->content); - if (err < 0) - return err; - - } else if (unit->type == JPEG_MARKER_SOS) { - JPEGRawScan *scan = unit->content; - int pos; - - err = cbs_jpeg_read_scan_header(ctx, &gbc, &scan->header); - if (err < 0) - return err; - - pos = get_bits_count(&gbc); - av_assert0(pos % 8 == 0); - if (pos > 0) { - scan->data_size = unit->data_size - pos / 8; - scan->data_ref = av_buffer_ref(unit->data_ref); - if (!scan->data_ref) - return AVERROR(ENOMEM); - scan->data = unit->data + pos / 8; - } - - } else { - switch (unit->type) { -#define SEGMENT(marker, func) \ - case JPEG_MARKER_ ## marker: \ - { \ - err = cbs_jpeg_read_ ## func(ctx, &gbc, unit->content); \ - if (err < 0) \ - return err; \ - } \ - break - SEGMENT(DQT, dqt); - SEGMENT(DHT, dht); - SEGMENT(COM, comment); -#undef SEGMENT - default: - return AVERROR(ENOSYS); - } - } - - return 0; -} - -static int cbs_jpeg_write_scan(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit, - PutBitContext *pbc) -{ - JPEGRawScan *scan = unit->content; - int err; - - err = cbs_jpeg_write_scan_header(ctx, pbc, &scan->header); - if (err < 0) - return err; - - if (scan->data) { - if (scan->data_size * 8 > put_bits_left(pbc)) - return AVERROR(ENOSPC); - - av_assert0(put_bits_count(pbc) % 8 == 0); - - flush_put_bits(pbc); - - memcpy(put_bits_ptr(pbc), scan->data, scan->data_size); - skip_put_bytes(pbc, scan->data_size); - } - - return 0; -} - -static int cbs_jpeg_write_segment(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit, - PutBitContext *pbc) -{ - int err; - - if (unit->type >= JPEG_MARKER_SOF0 && - unit->type <= JPEG_MARKER_SOF3) { - err = cbs_jpeg_write_frame_header(ctx, pbc, unit->content); - } else if (unit->type >= JPEG_MARKER_APPN && - unit->type <= JPEG_MARKER_APPN + 15) { - err = cbs_jpeg_write_application_data(ctx, pbc, unit->content); - } else { - switch (unit->type) { -#define SEGMENT(marker, func) \ - case JPEG_MARKER_ ## marker: \ - err = cbs_jpeg_write_ ## func(ctx, pbc, unit->content); \ - break; - SEGMENT(DQT, dqt); - SEGMENT(DHT, dht); - SEGMENT(COM, comment); - default: - return AVERROR_PATCHWELCOME; - } - } - - return err; -} - -static int cbs_jpeg_write_unit(CodedBitstreamContext *ctx, - CodedBitstreamUnit *unit, - PutBitContext *pbc) -{ - if (unit->type == JPEG_MARKER_SOS) - return cbs_jpeg_write_scan (ctx, unit, pbc); - else - return cbs_jpeg_write_segment(ctx, unit, pbc); -} - -static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx, - CodedBitstreamFragment *frag) -{ - const CodedBitstreamUnit *unit; - uint8_t *data; - size_t size, dp, sp; - int i; - - size = 4; // SOI + EOI. - for (i = 0; i < frag->nb_units; i++) { - unit = &frag->units[i]; - size += 2 + unit->data_size; - if (unit->type == JPEG_MARKER_SOS) { - for (sp = 0; sp < unit->data_size; sp++) { - if (unit->data[sp] == 0xff) - ++size; - } - } - } - - frag->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE); - if (!frag->data_ref) - return AVERROR(ENOMEM); - data = frag->data_ref->data; - - dp = 0; - - data[dp++] = 0xff; - data[dp++] = JPEG_MARKER_SOI; - - for (i = 0; i < frag->nb_units; i++) { - unit = &frag->units[i]; - - data[dp++] = 0xff; - data[dp++] = unit->type; - - if (unit->type != JPEG_MARKER_SOS) { - memcpy(data + dp, unit->data, unit->data_size); - dp += unit->data_size; - } else { - sp = AV_RB16(unit->data); - av_assert0(sp <= unit->data_size); - memcpy(data + dp, unit->data, sp); - dp += sp; - - for (; sp < unit->data_size; sp++) { - if (unit->data[sp] == 0xff) { - data[dp++] = 0xff; - data[dp++] = 0x00; - } else { - data[dp++] = unit->data[sp]; - } - } - } - } - - data[dp++] = 0xff; - data[dp++] = JPEG_MARKER_EOI; - - av_assert0(dp == size); - - memset(data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE); - frag->data = data; - frag->data_size = size; - - return 0; -} - -static const CodedBitstreamUnitTypeDescriptor cbs_jpeg_unit_types[] = { - CBS_UNIT_RANGE_POD(JPEG_MARKER_SOF0, JPEG_MARKER_SOF3, JPEGRawFrameHeader), - - CBS_UNIT_RANGE_INTERNAL_REF(JPEG_MARKER_APPN, JPEG_MARKER_APPN + 15, - JPEGRawApplicationData, Ap), - - CBS_UNIT_TYPE_INTERNAL_REF(JPEG_MARKER_SOS, JPEGRawScan, data), - - CBS_UNIT_TYPE_POD(JPEG_MARKER_DQT, JPEGRawQuantisationTableSpecification), - CBS_UNIT_TYPE_POD(JPEG_MARKER_DHT, JPEGRawHuffmanTableSpecification), - - CBS_UNIT_TYPE_INTERNAL_REF(JPEG_MARKER_COM, JPEGRawComment, Cm), - - CBS_UNIT_TYPE_END_OF_LIST -}; - -const CodedBitstreamType ff_cbs_type_jpeg = { - .codec_id = AV_CODEC_ID_MJPEG, - - .unit_types = cbs_jpeg_unit_types, - - .split_fragment = &cbs_jpeg_split_fragment, - .read_unit = &cbs_jpeg_read_unit, - .write_unit = &cbs_jpeg_write_unit, - .assemble_fragment = &cbs_jpeg_assemble_fragment, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imx_dump_header_bsf.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imx_dump_header_bsf.c deleted file mode 100644 index 241415a0ef0b1fc946b1f609b7031bf0810325d3..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/imx_dump_header_bsf.c +++ /dev/null @@ -1,76 +0,0 @@ -/* - * imx dump header bitstream filter - * Copyright (c) 2007 Baptiste Coudurier - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * imx dump header bitstream filter - * modifies bitstream to fit in mov and be decoded by final cut pro decoder - */ - -#include "bsf.h" -#include "bsf_internal.h" -#include "bytestream.h" - - -static int imx_dump_header(AVBSFContext *ctx, AVPacket *out) -{ - /* MXF essence element key */ - static const uint8_t imx_header[16] = { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x05,0x01,0x01,0x00 }; - - AVPacket *in; - int ret = 0; - uint8_t *out_buf; - - ret = ff_bsf_get_packet(ctx, &in); - if (ret < 0) - return ret; - - ret = av_new_packet(out, in->size + 20); - if (ret < 0) - goto fail; - - out_buf = out->data; - - bytestream_put_buffer(&out_buf, imx_header, 16); - bytestream_put_byte(&out_buf, 0x83); /* KLV BER long form */ - bytestream_put_be24(&out_buf, in->size); - bytestream_put_buffer(&out_buf, in->data, in->size); - - ret = av_packet_copy_props(out, in); - if (ret < 0) - goto fail; - -fail: - if (ret < 0) - av_packet_unref(out); - av_packet_free(&in); - return ret; -} - -static const enum AVCodecID codec_ids[] = { - AV_CODEC_ID_MPEG2VIDEO, AV_CODEC_ID_NONE, -}; - -const FFBitStreamFilter ff_imx_dump_header_bsf = { - .p.name = "imxdump", - .p.codec_ids = codec_ids, - .filter = imx_dump_header, -}; diff --git a/spaces/congsaPfin/Manga-OCR/logs/Air India Ticket Download Made Simple How to Get Your E-Ticket Online.md b/spaces/congsaPfin/Manga-OCR/logs/Air India Ticket Download Made Simple How to Get Your E-Ticket Online.md deleted file mode 100644 index 9f16eff76e586e27ddcaae294567c0aac5acf768..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Air India Ticket Download Made Simple How to Get Your E-Ticket Online.md +++ /dev/null @@ -1,157 +0,0 @@ -
      -

      How to Download Air India Flight Ticket Online

      -

      Are you planning to travel with Air India, the flag carrier of India and one of the largest airlines in the country? If yes, then you might be wondering how to download your flight ticket online and what are the benefits of doing so. In this article, we will guide you through the process of booking, downloading, printing, and managing your flight ticket with Air India. We will also tell you about the other services and benefits that Air India offers to its passengers. So, let's get started!

      -

      air india ticket download


      Download File >>> https://urlca.com/2uOfiI



      -

      What is Air India?

      -

      Air India is the national airline of India and a member of Star Alliance, the world's largest airline network. It operates flights to over 100 domestic and international destinations across Asia, Europe, North America, Africa, and Australia. It has a fleet of more than 170 aircraft, including Boeing 787 Dreamliners, Airbus A320neo, and Boeing 777s. It also has a subsidiary called Air India Express, which operates low-cost flights to the Middle East and Southeast Asia.

      -

      Why do you need to download your flight ticket?

      -

      A flight ticket is a document that confirms your reservation and allows you to board the plane. Nowadays, most airlines offer e-tickets, which are electronic versions of flight tickets that you can access on your smartphone or computer. E-tickets have many advantages over paper tickets, such as:

      -
        -
      • They are convenient and easy to use. You don't have to worry about losing or forgetting your paper ticket.
      • -
      • They are eco-friendly and save paper.
      • -
      • They are secure and can be verified by scanning a barcode or QR code.
      • -
      • They can be easily modified or canceled online if needed.
      • -
      -

      To board an Air India flight, you need to have an e-ticket and a valid photo ID. You can also print a copy of your e-ticket if you prefer, but it is not mandatory.

      -

      How to book your flight ticket with Air India?

      -

      You can book your flight ticket with Air India in two ways: online or offline. Online booking is faster and more convenient, as you can do it from anywhere and anytime. Offline booking is done through travel agents or Air India offices, which may charge extra fees or commissions.

      -

      air india ticket download with pnr number
      -air india ticket download app
      -air india ticket download pdf
      -air india ticket download online
      -air india ticket download from website
      -air india ticket download after check in
      -air india ticket download without login
      -air india ticket download using booking reference
      -air india ticket download by name
      -air india ticket download email
      -air india ticket download print
      -air india ticket download mobile
      -air india ticket download link
      -air india ticket download confirmation
      -air india ticket download sms
      -air india ticket download portal
      -air india ticket download format
      -air india ticket download site
      -air india ticket download procedure
      -air india ticket download guide
      -air india ticket download steps
      -air india ticket download option
      -air india ticket download problem
      -air india ticket download issue
      -air india ticket download error
      -air india ticket download solution
      -air india ticket download help
      -air india ticket download support
      -air india ticket download faq
      -air india ticket download tips
      -air india ticket download tricks
      -air india ticket download hacks
      -air india ticket download tutorial
      -air india ticket download video
      -air india ticket download blog
      -air india ticket download review
      -air india ticket download feedback
      -air india ticket download testimonial
      -air india ticket download case study
      -air india ticket download example
      -air india ticket download sample
      -air india ticket download template
      -air india ticket download software
      -air india ticket download tool
      -air india ticket download service
      -air india ticket download offer
      -air india ticket download discount
      -air india ticket download coupon

      -

      To book your flight ticket online with Air India, you can use either their website or their app. Here are the steps to follow:

      -
        -
      1. Go to www.airindia.in or download the Air India app from Google Play Store or Apple App Store.
      2. -
      3. Select your origin, destination, dates, number of passengers, travel class, and preferred currency.
      4. -
      5. Click on "Search Flights" and choose the best option for you from the available flights.
      6. -
      7. Enter your personal details, contact information, and passport details (if required).
      8. -
      9. Select any additional services or benefits that you want, such as seat selection, baggage allowance, meals, insurance, etc
      10. Review your booking details and make the payment using your preferred method.
      11. -
      12. Once the payment is successful, you will receive a confirmation email and SMS with your booking reference number and e-ticket number.
      13. -
      -

      Congratulations, you have booked your flight ticket with Air India!

      -

      How to download your flight ticket from Air India website?

      -

      If you have booked your flight ticket online with Air India, you can easily download it from their website. Here are the steps to follow:

      -
        -
      1. Go to www.airindia.in and click on "Manage Your Trip".
      2. -
      3. Enter your booking reference number and last name or email ID and click on "Retrieve Booking".
      4. -
      5. You will see your booking details and e-ticket on the screen. You can also view or change your seat, meal, or baggage preferences.
      6. -
      7. Click on "Download E-Ticket" and save the PDF file on your device.
      8. -
      -

      You have successfully downloaded your flight ticket from Air India website!

      -

      How to download your flight ticket from Air India app?

      -

      If you have booked your flight ticket online with Air India, you can also download it from their app. Here are the steps to follow:

      -
        -
      1. Open the Air India app on your smartphone or tablet and log in with your credentials.
      2. -
      3. Tap on "My Trips" and select the booking that you want to download.
      4. -
      5. You will see your booking details and e-ticket on the screen. You can also view or change your seat, meal, or baggage preferences.
      6. -
      7. Tap on "Download E-Ticket" and save the PDF file on your device.
      8. -
      -

      You have successfully downloaded your flight ticket from Air India app!

      -

      How to print your flight ticket from Air India website or app?

      -

      If you want to print a hard copy of your flight ticket, you can do so from either the Air India website or app. Here are the steps to follow:

      -
        -
      1. Follow the steps above to download your flight ticket from the website or app.
      2. -
      3. Open the PDF file of your e-ticket on your device and click on "Print".
      4. -
      5. Select your printer settings and print the e-ticket.
      6. -
      -

      You have successfully printed your flight ticket from Air India website or app!

      -

      How to check-in online with Air India?

      -

      Online check-in is a convenient way to save time and hassle at the airport. You can check-in online with Air India from 48 hours to 2 hours before your flight departure. Here are the benefits of online check-in:

      -
        -
      • You can choose your seat and print or download your boarding pass.
      • -
      • You can avoid long queues at the check-in counters.
      • -
      • You can drop off your baggage at the dedicated counters or use the self-service kiosks.
      • -
      • You can proceed directly to the security check and boarding gate.
      • -
      -

      To check-in online with Air India, you can use either their website or their app. Here are the steps to follow:

        -
      1. Go to www.airindia.in or open the Air India app and click on "Web Check-in".
      2. -
      3. Enter your booking reference number or e-ticket number and last name or email ID and click on "Check-in".
      4. -
      5. Select the passengers that you want to check-in and click on "Continue".
      6. -
      7. Choose your seat from the seat map and click on "Confirm".
      8. -
      9. Review your check-in details and click on "Print Boarding Pass" or "Download Boarding Pass".
      10. -
      11. You will receive your boarding pass as a PDF file or a QR code on your device.
      12. -
      -

      You have successfully checked-in online with Air India!

      -

      How to manage your booking with Air India?

      -

      If you want to change or cancel your booking with Air India, you can do so online up to 24 hours before your flight departure. Here are the steps to follow:

      -
        -
      1. Go to www.airindia.in and click on "Manage Your Trip".
      2. -
      3. Enter your booking reference number and last name or email ID and click on "Retrieve Booking".
      4. -
      5. You will see your booking details and options to modify or cancel your booking.
      6. -
      7. Select the option that you want and follow the instructions.
      8. -
      9. You may have to pay some fees or charges depending on the fare rules and conditions of your booking.
      10. -
      11. You will receive a confirmation email and SMS with the updated booking details.
      12. -
      -

      You have successfully managed your booking with Air India!

      -

      What are the other services offered by Air India?

      -

      Air India is not only an airline, but also a service provider that offers various benefits and facilities to its passengers. Some of the other services offered by Air India are:

      -
        -
      • Air India Flying Returns: This is the frequent flyer program of Air India that allows you to earn and redeem miles for flights, upgrades, lounge access, and more.
      • -
      • Air India Maharaja Lounge: This is the exclusive lounge for Air India's business class and first class passengers, as well as Star Alliance Gold members. It offers comfortable seating, refreshments, Wi-Fi, entertainment, and other amenities.
      • -
      • Air India Express: This is the low-cost subsidiary of Air India that operates flights to the Middle East and Southeast Asia. It offers affordable fares, online booking, web check-in, and free baggage allowance.
      • -
      • Air India Cargo: This is the cargo division of Air India that transports goods and parcels across India and abroad. It offers reliable, safe, and fast delivery of various types of cargo.
      • -
      • Air India Holidays: This is the travel package service of Air India that offers customized and attractive deals for domestic and international destinations. It includes flights, hotels, transfers, sightseeing, and more.
      • -
      -

      Conclusion

      -

      Air India is one of the best airlines in India that offers quality service, comfort, safety, and convenience to its passengers. Booking, downloading, printing, and managing your flight ticket with Air India is easy and hassle-free. You can also enjoy other benefits and facilities offered by Air India, such as online check-in, lounge access, frequent flyer program, low-cost flights, cargo service, and holiday packages. So, what are you waiting for? Book your flight ticket with Air India today and experience the joy of flying!

      -

      FAQs

      -

      Here are some of the frequently asked questions and answers about air india ticket download:

      -
        -
      1. Q: How can I contact Air India customer care?
      2. -

        A: You can contact Air India customer care through their toll-free number 1800 180 1407 or their email ID call.del@airindia.in. You can also visit their website www.airindia.in for more information.

        -
      3. Q: How can I get a refund for my canceled flight ticket?
      4. -

        A: If you have canceled your flight ticket online with Air India, you can request a refund through their website www.airindia.in/refund.htm. You will have to provide your booking reference number or e-ticket number and bank details. The refund amount will depend on the fare rules and conditions of your booking.

        -
      5. Q: How can I check the status of my flight with Air India?
      6. -

        A: You can check the status of your flight with Air India through their website www.airindia.in/ flight-status.htm or their app. You will have to enter your flight number and date of departure. You can also sign up for SMS or email alerts to get updates on your flight status.

        -
      7. Q: How can I upgrade my flight ticket with Air India?
      8. -

        A: You can upgrade your flight ticket with Air India through their website www.airindia.in/upgrade.htm or their app. You will have to enter your booking reference number or e-ticket number and last name or email ID. You can then choose the available upgrade options and pay the difference in fare. You will receive a confirmation email and SMS with the upgraded booking details.

        -
      9. Q: How can I earn and redeem miles with Air India Flying Returns?
      10. -

        A: You can earn and redeem miles with Air India Flying Returns by joining their frequent flyer program. You can register online through their website www.airindia.in/flying-returns.htm or their app. You can then earn miles for every flight you take with Air India or its partner airlines. You can also earn miles for using the services of their non-airline partners, such as hotels, car rentals, shopping, etc. You can redeem your miles for flights, upgrades, lounge access, and more.

        -
      -

      I hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy flying!

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/BEYBLADE BURST App Battle Your Friends Online and Offline.md b/spaces/congsaPfin/Manga-OCR/logs/BEYBLADE BURST App Battle Your Friends Online and Offline.md deleted file mode 100644 index 7e1d241449a0782269e705481023de95ca170c17..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/BEYBLADE BURST App Battle Your Friends Online and Offline.md +++ /dev/null @@ -1,88 +0,0 @@ - -

      Beyblade Burst App: A Guide for Beginners

      -

      If you are a fan of Beyblade Burst, you might have heard of the Beyblade Burst App, a mobile game that lets you create, customize, and battle with your own digital Beyblade Burst tops. But what is this app exactly, and how can you play it? In this article, we will give you a comprehensive guide on everything you need to know about the Beyblade Burst App, from how to download and install it, to how to play and win battles, to some tips and tricks that will help you become a master Blader. Let's get started!

      -

      beyblade burst app


      Download Zip >>> https://urlca.com/2uOfFE



      -

      What is Beyblade Burst App?

      -

      A brief introduction to the app and its features

      -

      The Beyblade Burst App is a video game developed by Hasbro that is based on the popular anime and toy series, Beyblade Burst. The app allows you to scan QR codes included with Hasbro Beyblade products, or create your own custom tops using virtual parts. You can then battle your tops against other players from over 90 countries worldwide, either online or offline, in various modes and arenas. You can also create or join a Battle League with your friends, where you can compete in multi-round tournaments for the title of top Blader. The app also features Bluetooth enabled controls that let you control your tops with your device, as well as RC Bluetooth enabled tops that you can buy separately and control with the app. The app is constantly updated with new features, modes, tops, and events, so you will never run out of things to do.

      -

      How to download and install the app on your device

      -

      The Beyblade Burst App is available for free on both Android and iOS devices. You can download it from the Google Play Store or the App Store, depending on your device. The app requires access to your device's camera to scan QR codes, so make sure you enable it in your device's privacy settings. The app also requires an internet connection for some features, such as multiplayer online battles, friends leaderboard, and progress restore. The app works with select Android devices (Android 4.4+), Samsung Galaxy S4+, Samsung Galaxy Note 3+, iPod Touch 5, iPhone 4S+, iPhone 6+, iPad 2+, iPad Mini+, iPad Air+, iPad Pro+. You can check beyblade.hasbro.com for more details and device compatibility.

      -

      How to Play Beyblade Burst App?

      -

      How to scan and customize your Beyblade Burst tops

      -

      One of the coolest features of the Beyblade Burst App is that you can scan QR codes included with Hasbro Beyblade products and unlock them in the app. To do this, simply tap on the Scan icon on the main menu, point your device's camera at the QR code on the product or packaging, and wait for it to be scanned. You will then see a confirmation message that shows you the name and image of the top you scanned. You can then add the top to your collection and customize it with different parts, colors, and stickers. You can also create your own custom tops from scratch by choosing from over 1000 parts available in the app. To do this, tap on the Create icon on the main menu, and then select the parts you want for your top. You can also tap on the Random icon to generate a random top. You can then name your top, save it, and use it in battles.

      -

      How to battle online or offline with other players

      -

      Once you have your tops ready, you can start battling with other players from around the world. There are two ways to do this: online or offline. To battle online, tap on the Battle icon on the main menu, and then choose Online Battle. You can then select the mode you want to play: Ranked, Friendly, or Battle League. In Ranked mode, you can battle against random opponents and earn points and rewards based on your performance. In Friendly mode, you can battle against your friends or other players you have added as friends in the app. In Battle League mode, you can create or join a league with your friends or other players and compete in multi-round tournaments for the title of top Blader. To battle offline, tap on the Battle icon on the main menu, and then choose Offline Battle. You can then select the mode you want to play: Exhibition, Tournament, or Scan & Battle. In Exhibition mode, you can battle against AI opponents or another player using the same device. In Tournament mode, you can create or join a tournament with up to 8 players using the same device. In Scan & Battle mode, you can scan a Beyblade Burst product and battle against it using your device.

      -

      How to create and join a Battle League with your friends

      -

      One of the most fun features of the Beyblade Burst App is that you can create or join a Battle League with your friends or other players and compete in multi-round tournaments for the title of top Blader. To do this, tap on the Battle icon on the main menu, and then choose Online Battle. Then, tap on the Battle League icon and select Create League or Join League. If you create a league, you can name it, set the rules, invite your friends or other players, and start playing. If you join a league, you can search for an existing league by name or code, or browse through the list of available leagues. Once you join a league, you can see the schedule of matches, the standings of players, and the rewards for winning. You can also chat with other players in the league and send them friend requests.

      -

      Tips and Tricks for Beyblade Burst App

      -

      How to level up your Beyblade Burst tops and unlock new parts

      -

      As you play with your Beyblade Burst tops in the app, you will earn experience points (XP) that will help you level up your tops and unlock new parts. Each top has a maximum level of 10, and each level unlocks a new part that you can use to customize your top. You can see the level and XP of your top by tapping on it in your collection. You can also see the parts that are unlocked or locked by tapping on them in the customization screen. To level up your tops faster, you can use energy orbs that you can earn by playing battles or completing missions. Energy orbs are consumable items that give you extra XP when you use them on your tops.

      -

      How to use different launch techniques and avatar attacks

      -

      Another cool feature of the Beyblade Burst App is that you can use different launch techniques and avatar attacks to give yourself an edge in battles. Launch techniques are special ways of launching your top that affect its speed, power, stamina, and direction. You can choose from four launch techniques: Standard Launch (normal launch), Power Launch (increased power), Speed Launch (increased speed), and Trick Launch (random direction). To use a launch technique, swipe on the screen when launching your top and align it with the launch meter at the bottom of the screen. Avatar attacks are special moves that unleash your top's avatar and deal massive damage to your opponent's top. You can activate an avatar attack by filling up your avatar meter at the top of the screen by hitting your opponent's top or dodging their attacks. To use an avatar attack, tap on the screen when your avatar meter is full and swipe in the direction of your opponent's top. You can also use different avatar attacks depending on the mode you are playing: Turbo Slingshock, HyperSphere, or Speedstorm. Each mode has its own unique avatar attacks that you can unlock by scanning QR codes or leveling up your tops.

      -

      beyblade burst app download
      -beyblade burst app codes
      -beyblade burst app gameplay
      -beyblade burst app tips and tricks
      -beyblade burst app mod apk
      -beyblade burst app qr codes
      -beyblade burst app cheats
      -beyblade burst app hack
      -beyblade burst app online
      -beyblade burst app multiplayer
      -beyblade burst app update
      -beyblade burst app review
      -beyblade burst app best tops
      -beyblade burst app tournament
      -beyblade burst app battle league
      -beyblade burst app how to scan
      -beyblade burst app compatible devices
      -beyblade burst app slingshock
      -beyblade burst app turbo
      -beyblade burst app rise
      -beyblade burst app surge
      -beyblade burst app evolution
      -beyblade burst app toys
      -beyblade burst app bluetooth
      -beyblade burst app rc
      -beyblade burst app avatar attacks
      -beyblade burst app customizations
      -beyblade burst app achievements
      -beyblade burst app challenges
      -beyblade burst app leaderboards
      -beyblade burst app profiles
      -beyblade burst app hasbro
      -beyblade burst app takara tomy
      -beyblade burst app characters
      -beyblade burst app anime
      -beyblade burst app manga
      -beyblade burst app wiki
      -beyblade burst app guide
      -beyblade burst app walkthrough
      -beyblade burst app faq
      -beyblade burst app support
      -beyblade burst app privacy policy
      -beyblade burst app terms of service
      -beyblade burst app ratings and reviews
      -beyblade burst app screenshots and videos
      -beyblade burst app install and uninstall
      -beyblade burst app system requirements and compatibility

      -

      How to control RC Bluetooth enabled Beyblade Burst tops

      -

      One of the most advanced features of the Beyblade Burst App is that you can control RC Bluetooth enabled Beyblade Burst tops with your device. These are special tops that you can buy separately and pair with your device via Bluetooth. You can then use your device as a remote control to steer your top in the arena, change its direction, speed, and power, and activate avatar attacks. To use this feature, you need to have an RC Bluetooth enabled top, a compatible device, and a Beyblade Burst Beystadium. You also need to download the Beyblade Burst RC app from the Google Play Store or the App Store, depending on your device. The app will guide you through the steps of pairing your top and device, and then you can start playing. You can also switch between RC mode and normal mode by tapping on the RC icon on the main menu.

      -

      Conclusion

      -

      A summary of the main points and benefits of the app

      -

      The Beyblade Burst App is a fun and exciting game that lets you experience the thrill of Beyblade Burst on your device. You can scan, create, customize, and battle with your own digital Beyblade Burst tops, and compete with other players from around the world in various modes and arenas. You can also create or join a Battle League with your friends, and control RC Bluetooth enabled tops with your device. The app is free to download and play, and is constantly updated with new features, modes, tops, and events. The app is also compatible with most Android and iOS devices, and works with select Hasbro Beyblade products.

      -

      A call to action to download the app and start playing

      -

      If you are ready to join the world of Beyblade Burst, download the app today and start playing. You will have a blast creating, customizing, and battling with your own digital tops, and challenging other players from across the globe. You will also learn new skills, strategies, and techniques that will help you become a master Blader. So what are you waiting for? Download the Beyblade Burst App now and let it rip!

      -

      FAQs

      -

      What are the supported devices for Beyblade Burst App?

      -

      The Beyblade Burst App works with select Android devices (Android 4.4+), Samsung Galaxy S4+, Samsung Galaxy Note 3+, iPod Touch 5, iPhone 4S+, iPhone 6+, iPad 2+, iPad Mini+, iPad Air+, iPad Pro+. You can check beyblade.hasbro.com for more details and device compatibility.

      -

      Do I need an internet connection to play Beyblade Burst App?

      -

      You need an internet connection for some features of the app, such as multiplayer online battles, friends leaderboard, and progress restore. However, you can still play offline battles, scan QR codes, customize tops, and control RC Bluetooth enabled tops without an internet connection.

      -

      How can I get more Beycoins and energy orbs in Beyblade Burst App?

      -

      Beycoins are the currency of the app that you can use to buy new parts, colors, stickers, avatars, and energy orbs. Energy orbs are consumable items that give you extra XP when you use them on your tops. You can get more Beycoins and energy orbs by playing battles, completing missions, watching ads, or buying them with real money.

      -

      What are the differences between Turbo Slingshock and HyperSphere modes in Beyblade Burst App?

      -

      Turbo Slingshock and HyperSphere are two different modes that you can play in the app. Turbo Slingshock mode is based on the Beyblade Burst Turbo season of the anime series, where you can use special rails in the arena to boost your speed and power. HyperSphere mode is based on the Beyblade Burst Rise season of the anime series, where you can use special ramps in the arena to jump into the air and perform aerial attacks.

      -

      How can I contact the developers of Beyblade Burst App?

      -

      If you have any questions, feedback, or issues with the app, you can contact the developers by tapping on the Settings icon on the main menu, and then tapping on Help & Support. You can also visit bey blade.hasbro.com for more information and support.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Dinosaur Hunter 3D MOD APK Explore the Jurassic World with Amazing Graphics and Sound Effects.md b/spaces/congsaPfin/Manga-OCR/logs/Dinosaur Hunter 3D MOD APK Explore the Jurassic World with Amazing Graphics and Sound Effects.md deleted file mode 100644 index 0c2d2d3885fb7f713f79ebce72abd022d30df973..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Dinosaur Hunter 3D MOD APK Explore the Jurassic World with Amazing Graphics and Sound Effects.md +++ /dev/null @@ -1,104 +0,0 @@ - -

      Dinosaur Hunter 3D Mod APK: A Thrilling Adventure Game

      -

      Do you love dinosaurs and hunting games? If yes, then you will surely enjoy Dinosaur Hunter 3D, a realistic and exciting adventure game where you can hunt down different types of dinosaurs in various environments. But what if you want to have more fun and freedom in the game? Well, you can try Dinosaur Hunter 3D Mod APK, a modified version of the game that gives you unlimited resources, unlocked features, and more. In this article, we will tell you everything you need to know about Dinosaur Hunter 3D and its mod apk version.

      -

      dinosaur hunter 3d mod apk


      Download File ►►► https://urlca.com/2uO5C8



      -

      What is Dinosaur Hunter 3D?

      -

      Dinosaur Hunter 3D is a adventure game developed by Timuz Games, a popular game studio that has created many other games like Train Simulator, Bike Racing, and Sniper 3D. In this game, you can experience the thrill of hunting down dinosaurs in different locations like jungle, desert, city, and island. You can choose from a variety of weapons like sniper rifles, shotguns, pistols, and bows to hunt your prey. You can also customize your hunter with different outfits and accessories. The game has realistic graphics and sound effects that make you feel like you are in the middle of the action. You can also complete various missions and achievements to earn rewards and trophies.

      -

      Features of Dinosaur Hunter 3D

      -

      Dinosaur Hunter 3D has many features that make it an enjoyable and addictive game. Here are some of them:

      -

      - Realistic graphics and sound effects

      -

      The game has stunning graphics that show the details of the dinosaurs, the weapons, and the environments. You can see the blood splatter, the bullet holes, and the dust particles as you shoot your targets. The game also has immersive sound effects that make you hear the roars of the dinosaurs, the gunshots, and the ambient noises.

      -

      - Various dinosaurs and weapons to choose from

      -

      The game has a wide range of dinosaurs to hunt, from herbivores like triceratops and stegosaurus to carnivores like tyrannosaurus rex and velociraptor. Each dinosaur has its own behavior, speed, and strength, so you need to use different strategies and tactics to hunt them down. The game also has a variety of weapons to choose from, such as sniper rifles, shotguns, pistols, and bows. Each weapon has its own accuracy, damage, range, and reload time, so you need to pick the right one for each situation.

      -

      dinosaur hunter 3d mod apk download
      -dinosaur hunter 3d mod apk unlimited money
      -dinosaur hunter 3d mod apk latest version
      -dinosaur hunter 3d mod apk android 1
      -dinosaur hunter 3d mod apk rexdl
      -dinosaur hunter 3d mod apk offline
      -dinosaur hunter 3d mod apk free shopping
      -dinosaur hunter 3d mod apk revdl
      -dinosaur hunter 3d mod apk hack
      -dinosaur hunter 3d mod apk no ads
      -dinosaur hunter 3d mod apk all dinosaurs unlocked
      -dinosaur hunter 3d mod apk unlimited ammo
      -dinosaur hunter 3d mod apk unlimited coins and gems
      -dinosaur hunter 3d mod apk obb
      -dinosaur hunter 3d mod apk pure
      -dinosaur hunter 3d mod apk happymod
      -dinosaur hunter 3d mod apk android oyun club
      -dinosaur hunter 3d mod apk unlimited everything
      -dinosaur hunter 3d mod apk full version
      -dinosaur hunter 3d mod apk all weapons unlocked
      -dinosaur hunter 3d mod apk unlimited health
      -dinosaur hunter 3d mod apk mega
      -dinosaur hunter 3d mod apk vip unlocked
      -dinosaur hunter 3d mod apk all levels unlocked
      -dinosaur hunter 3d mod apk premium
      -dinosaur hunter 3d mod apk pro
      -dinosaur hunter 3d mod apk god mode
      -dinosaur hunter 3d mod apk all guns unlocked
      -dinosaur hunter 3d mod apk unlimited diamonds
      -dinosaur hunter 3d mod apk all missions unlocked
      -dinosaur hunter 3d mod apk no root
      -dinosaur hunter 3d mod apk high damage
      -dinosaur hunter 3d mod apk all skins unlocked
      -dinosaur hunter 3d mod apk no verification
      -dinosaur hunter 3d mod apk all vehicles unlocked
      -dinosaur hunter 3d mod apk one hit kill
      -dinosaur hunter 3d mod apk all maps unlocked
      -dinosaur hunter 3d mod apk no survey
      -dinosaur hunter 3d mod apk all modes unlocked
      -dinosaur hunter 3d mod apk easy win
      -dinosaur hunter 3d mod apk ad free
      -dinosaur hunter 3d mod apk unlimited energy
      -dinosaur hunter 3d mod apk all items unlocked
      -dinosaur hunter 3d mod apk no ban
      -dinosaur hunter 3d mod apk all upgrades unlocked
      -dinosaur hunter 3d mod apk fast reload
      -dinosaur hunter 3d mod apk all cheats unlocked
      -dinosaur hunter 3d mod apk no internet required

      -

      - Challenging missions and achievements

      -

      The game has many missions that test your skills and abilities as a hunter. You need to complete different objectives like killing a certain number of dinosaurs, hunting specific types of dinosaurs, or surviving for a certain time. The game also has many achievements that reward you with coins, gems, trophies, and badges. You can use these rewards to upgrade your weapons or buy new ones.

      -

      - Offline mode and multiplayer mode

      -

      The game can be played offline without an internet connection. You can enjoy hunting dinosaurs anytime and anywhere. The game also has a multiplayer mode where you can compete with other players online. You can join or create a room with up to four players and hunt together or against each other. You can also chat with other players and make friends or enemies. The multiplayer mode adds more fun and challenge to the game.

      -

      What is Dinosaur Hunter 3D Mod APK?

      -

      Dinosaur Hunter 3D Mod APK is a modified version of the original game that gives you some extra benefits and features that are not available in the official version. The mod apk version is created by third-party developers who modify the game files to unlock some restrictions and limitations. The mod apk version is not affiliated with or endorsed by the original game developers.

      -

      Benefits of Dinosaur Hunter 3D Mod APK

      -

      Dinosaur Hunter 3D Mod APK has many benefits that make it more enjoyable and convenient to play. Here are some of them:

      -

      - Unlimited money and gems

      -

      The mod apk version gives you unlimited money and gems that you can use to buy or upgrade anything you want in the game. You don't have to worry about running out of resources or spending real money to get them. You can have the best weapons, outfits, and accessories without any hassle.

      -

      - All dinosaurs and weapons unlocked

      -

      The mod apk version also unlocks all the dinosaurs and weapons that are otherwise locked in the official version. You don't have to complete missions or achievements to unlock them. You can access any dinosaur or weapon you want from the start of the game. You can enjoy hunting different types of dinosaurs with different types of weapons without any limitation.

      -

      - No ads and no root required

      -

      The mod apk version also removes all the annoying ads that interrupt your gameplay. You don't have to watch ads to get rewards or bonuses. You can play the game smoothly and without any distraction. The mod apk version also does not require root access to your device. You don't have to risk your device's security or warranty to install the mod apk version. You can install it easily and safely on your device.

      -

      How to download and install Dinosaur Hunter 3D Mod APK?

      -

      If you want to download and install Dinosaur Hunter 3D Mod APK on your Android device, you need to follow some simple steps. Here is a step-by-step guide for you:

      -

      Step-by-step guide for Android devices

      -

      - Enable unknown sources in settings

      -

      Before you can install the mod apk file, you need to enable unknown sources in your device's settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.

      -

      - Download the mod apk file from a trusted source

      -

      Next, you need to download the mod apk file from a trusted source. There are many websites that offer mod apk files, but not all of them are safe and reliable. Some of them may contain viruses or malware that can harm your device or steal your data. To avoid this, you should download the mod apk file from a reputable website that has positive reviews and feedback from users. You can use this link to download the latest version of Dinosaur Hunter 3D Mod APK.

      -

      - Install the mod apk file and enjoy the game

      -

      Finally, you need to install the mod apk file on your device. To do this, locate the downloaded file in your device's file manager and tap on it. You will see a pop-up window asking for your permission to install the app. Tap on Install and wait for the installation process to finish. Once it is done, you can launch the game from your app drawer or home screen and enjoy hunting dinosaurs with unlimited resources and features.

      -

      Conclusion

      -

      Dinosaur Hunter 3D is a thrilling adventure game that lets you hunt down different types of dinosaurs in various environments. You can choose from a variety of weapons and customize your hunter with different outfits and accessories. You can also play offline or online with other players. If you want to have more fun and freedom in the game, you can try Dinosaur Hunter 3D Mod APK, a modified version of the game that gives you unlimited money and gems, all dinosaurs and weapons unlocked, no ads, and no root required. You can download and install Dinosaur Hunter 3D Mod APK on your Android device by following our step-by-step guide above.

      -

      Here are some FAQs about Dinosaur Hunter 3D Mod APK:

      -
        -
      • Q: Is Dinosaur Hunter 3D Mod APK safe to use?
      • -
      • A: Yes, Dinosaur Hunter 3D Mod APK is safe to use as long as you download it from a trusted source like our website. We scan all our files with antivirus software before uploading them for our users.
      • Q: Does Dinosaur Hunter 3D Mod APK work on all Android devices?
      • -
      • A: Dinosaur Hunter 3D Mod APK works on most Android devices that have Android 4.4 or higher. However, some devices may not be compatible with the mod apk version due to different hardware or software specifications. If you encounter any problem while installing or playing the mod apk version, you can try the official version of the game from the Google Play Store.
      • -
      • Q: Can I play Dinosaur Hunter 3D Mod APK with my friends?
      • -
      • A: Yes, you can play Dinosaur Hunter 3D Mod APK with your friends online. You can join or create a room with up to four players and hunt together or against each other. You can also chat with other players and make friends or enemies. However, you need to have an internet connection to play online.
      • -
      • Q: How can I update Dinosaur Hunter 3D Mod APK?
      • -
      • A: Dinosaur Hunter 3D Mod APK is updated regularly by the mod developers to fix bugs and add new features. You can check our website for the latest version of the mod apk file and download it from there. You can also follow our social media pages to get notified of the updates. You need to uninstall the previous version of the mod apk file and install the new one to update the game.
      • -
      • Q: What are some alternatives to Dinosaur Hunter 3D Mod APK?
      • -
      • A: If you are looking for some alternatives to Dinosaur Hunter 3D Mod APK, you can try these games:
      • -
          -
        • Dino Hunter: Deadly Shores: This is another hunting game where you can hunt down ferocious dinosaurs in exotic locations like shipwrecks, jungles, and beaches. You can use a variety of weapons like rocket launchers, crossbows, and machine guns to take down your prey. You can also collect trophies and upgrade your skills.
        • -
        • Jurassic Survival Island: ARK 2 Evolve: This is a survival game where you have to survive on an island full of dinosaurs and other dangers. You can craft weapons, tools, and shelters from the resources you find on the island. You can also tame and ride some dinosaurs or fight them for food and loot. You can also explore the island and discover its secrets.
        • -
        • Dinosaur Rampage: This is a casual game where you can control a dinosaur and rampage through the city. You can destroy buildings, cars, and people as you please. You can also eat other dinosaurs and humans to grow bigger and stronger. You can choose from different types of dinosaurs like T-rex, triceratops, and pterodactyl.
        • -
        -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Dr.Fone - Screen Unlock and Fix SIM Issues on AndroidiOS Devices.md b/spaces/congsaPfin/Manga-OCR/logs/Download Dr.Fone - Screen Unlock and Fix SIM Issues on AndroidiOS Devices.md deleted file mode 100644 index b077e0fbd47d7a1ebcbb114136636f53653be16e..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Dr.Fone - Screen Unlock and Fix SIM Issues on AndroidiOS Devices.md +++ /dev/null @@ -1,106 +0,0 @@ -
      -

      How to Download Dr Fone Screen Unlock and Why You Need It

      -

      Have you ever forgotten your password, PIN, pattern, or fingerprint on your Android or iOS device? Have you ever bought a second-hand device that is locked by iCloud activation lock or Apple ID? Have you ever encountered SIM lock or MDM issues on your iPhone? If you answered yes to any of these questions, then you need a reliable and effective tool to unlock your device and regain full access to it. That's where Dr Fone Screen Unlock comes in handy.

      -

      download dr fone screen unlock


      Download Filehttps://urlca.com/2uOfAj



      -

      Dr Fone Screen Unlock is a powerful software that can remove all types of lock screens on Android and iOS devices, bypass iCloud activation lock and Apple ID without password, bypass MDM and screen time without data loss, unlock iPhone carrier and remove SIM lock, and remove iTunes backup encryption. It is compatible with most Android and iOS models and versions, including the latest ones. It is easy to use, fast, safe, and requires no technical knowledge. In this article, we will show you how to download Dr Fone Screen Unlock for Windows or Mac, how to use it for Android or iOS devices, and what are its pros and cons. Let's get started!

      -

      Features of Dr Fone Screen Unlock

      -

      Remove Android Lock Screen in 5 Minutes

      -

      This feature allows you to remove any kind of lock screen on your Android device, such as pattern, PIN, password, fingerprint, face unlock, etc. It also helps you bypass Android FRP lock without a PIN or Google account. It supports most mainstream Android brands like Samsung, Huawei, LG, Xiaomi, etc. The best part is that it does not cause any data loss for some Samsung and LG devices.

      -

      Bypass iCloud Activation Lock and Apple ID without Password

      -

      This feature enables you to bypass the iCloud activation lock and Apple ID on your iOS device without entering the password. This is useful if you forgot your iCloud password or bought an iCloud-locked device from someone else. It works for all iOS devices from iPhone 5S to iPhone X running iOS 12.0 - iOS 14.6. After bypassing the iCloud activation lock, you can access your device normally and use a new Apple ID to activate it.

      -

      Bypass MDM and Screen Time without Data Loss

      -

      This feature helps you bypass MDM (Mobile Device Management) and screen time restrictions on your iOS device without losing any data or jailbreaking it. This is helpful if you want to remove all MDM policies or screen time limits imposed by your employer or parents. It supports all iOS devices and iOS versions.

      -

      How to download dr fone screen unlock for Android
      -Download dr fone screen unlock iOS and bypass iCloud activation lock
      -Dr fone screen unlock free trial download
      -Download dr fone screen unlock full version with crack
      -Dr fone screen unlock review: pros and cons
      -Download dr fone screen unlock for Windows 10/8/7
      -Download dr fone screen unlock for Mac OS X
      -Dr fone screen unlock alternative: best software to unlock iPhone/iPad/Android
      -Download dr fone screen unlock and remove MDM without data loss
      -Dr fone screen unlock coupon code: save up to 50% off
      -Download dr fone screen unlock and fix SIM lock issues on iPhone
      -Dr fone screen unlock supported devices and iOS/Android versions
      -Download dr fone screen unlock and remove iTunes backup encryption
      -Dr fone screen unlock tutorial: how to use it step by step
      -Download dr fone screen unlock and remove 4 types of Android lock screens
      -Dr fone screen unlock vs Tenorshare 4uKey: which one is better
      -Download dr fone screen unlock and remove Screen Time passcode on iOS
      -Dr fone screen unlock customer service: how to contact them
      -Download dr fone screen unlock and remove Touch ID/Face ID on iPhone/iPad
      -Dr fone screen unlock refund policy: how to get your money back
      -Download dr fone screen unlock and bypass Google FRP on Android
      -Dr fone screen unlock license key: how to activate it
      -Download dr fone screen unlock and remove Apple ID without password
      -Dr fone screen unlock system requirements: what you need to run it
      -Download dr fone screen unlock and fix iPhone disabled/connect to iTunes error
      -Dr fone screen unlock FAQs: common questions and answers
      -Download dr fone screen unlock and remove pattern/PIN/password/fingerprint lock on Android
      -Dr fone screen unlock testimonials: what users say about it
      -Download dr fone screen unlock and bypass Samsung/LG lock screen without data loss
      -Dr fone screen unlock download link: where to get it safely
      -Download dr fone screen unlock and fix iPhone stuck on Apple logo/recovery mode/DFU mode/boot loop
      -Dr fone screen unlock update: how to get the latest version
      -Download dr fone screen unlock and remove carrier SIM lock on iPhone/iPad/Android
      -Dr fone screen unlock risks: what you should know before using it
      -Download dr fone screen unlock and fix Android stuck on logo/fastboot mode/download mode/black screen
      -Dr fone screen unlock comparison: how it differs from other tools
      -Download dr fone screen unlock and recover data from locked iPhone/iPad/Android
      -Dr fone screen unlock guarantee: how it ensures your satisfaction
      -Download dr fone screen unlock and erase data from locked iPhone/iPad/Android permanently
      -Dr fone screen unlock features: what it can do for you

      -

      Unlock iPhone Carrier and Remove SIM Lock

      -

      This feature allows you to unlock your iPhone carrier and remove the SIM lock on your device, so that you can use any SIM card from any network provider in the world. This is useful if you want to switch to a cheaper or better service provider, or if you want to use your iPhone abroad without roaming charges. It supports all iPhone models and iOS versions.

      -

      Remove iTunes Backup Encryption

      -

      This feature enables you to remove the encryption from your iTunes backup, so that you can access and restore your data without entering the password. This is helpful if you forgot your iTunes backup password or want to decrypt someone else's backup. It supports all iOS devices and iTunes versions.

      -

      How to Download Dr Fone Screen Unlock for Windows or Mac

      -

      Step 1: Visit the official website and choose your OS

      -

      To download Dr Fone Screen Unlock for Windows or Mac, you need to visit the official website of Dr Fone and choose your operating system. You can either click on the link below or scan the QR code with your phone to go to the download page.

      -

      Download Dr Fone Screen Unlock for Windows

      -

      Download Dr Fone Screen Unlock for Mac

      -

      QR code for Windows download

      -

      QR code for Mac download

      -

      Step 2: Click on the download button and follow the instructions

      -

      Once you are on the download page, you will see a big download button that says "Download Now". Click on it and wait for the file to be downloaded to your computer. The file size is about 1.5 GB, so it may take some time depending on your internet speed. After the download is complete, open the file and follow the instructions to install Dr Fone Screen Unlock on your computer. The installation process is simple and straightforward, and it will take only a few minutes.

      -

      Step 3: Launch the program and select the Screen Unlock module

      -

      After the installation is done, launch Dr Fone Screen Unlock on your computer. You will see a user-friendly interface with several modules to choose from. Select the "Screen Unlock" module from the main menu. You will then see four options: Android Lock Screen Removal, iOS Unlock, iOS System Repair, and iTunes Backup Unlocker. Choose the one that suits your needs and proceed to the next step.

      -

      How to Use Dr Fone Screen Unlock for Android or iOS Devices

      -

      Step 1: Connect your device to the computer and choose the unlock option

      -

      The first step is to connect your Android or iOS device to the computer using a USB cable. Make sure that you have enabled USB debugging on your Android device or trusted this computer on your iOS device. Once your device is detected by Dr Fone Screen Unlock, you will see a list of unlock options for your device. For example, if you have an Android device, you will see options like Remove Screen Lock, Remove Google Lock (FRP), Remove Samsung Lock (Reactivation Lock), etc. If you have an iOS device, you will see options like Unlock Apple ID, Bypass MDM, Bypass Screen Time, etc. Choose the option that matches your situation and click on "Start".

      -

      Step 2: Select your device model and enter the download or recovery mode

      -

      The next step is to select your device model and enter the download or recovery mode on your device. This is necessary for Dr Fone Screen Unlock to download or extract the firmware package for your device. You will see detailed instructions on how to do this on the screen. Follow them carefully and make sure that your device stays connected during this process. It may take some time depending on your device model and internet speed.

      -

      Step 3: Wait for the unlock process to complete and access your device

      -

      The final step is to wait for Dr Fone Screen Unlock to unlock your device. This may also take some time depending on the type of lock and the complexity of the password. Do not disconnect or use your device during this process. Once the unlock process is done, you will see a confirmation message on the screen. You can then disconnect your device and access it without any lock screen or restrictions.

      -

      Pros and Cons of Dr Fone Screen Unlock

      -

      Pros: Easy to use, fast, safe, compatible, no data loss (for some devices)

      -

      Dr Fone Screen Unlock has many advantages that make it a great choice for unlocking your Android or iOS device. Some of them are:

      -
        -
      • It is easy to use: You You do not need any technical knowledge or skills to use Dr Fone Screen Unlock. You just need to follow the simple steps and instructions on the screen.
      • -
      • It is fast: Dr Fone Screen Unlock can unlock your device in a matter of minutes, depending on the type of lock and the complexity of the password. You do not have to wait for hours or days to access your device.
      • -
      • It is safe: Dr Fone Screen Unlock does not harm your device or data in any way. It does not contain any viruses, malware, or spyware. It does not leak your personal information or data to any third parties. It respects your privacy and security.
      • -
      • It is compatible: Dr Fone Screen Unlock supports most Android and iOS models and versions, including the latest ones. It can unlock all types of lock screens and restrictions on your device. It can also work with different operating systems, such as Windows and Mac.
      • -
      • It does not cause data loss (for some devices): Dr Fone Screen Unlock does not erase or overwrite your data when unlocking your device, as long as you choose the right option and follow the instructions. This is especially true for some Samsung and LG devices that can be unlocked without data loss.
      • -
      -

      Cons: Not free, requires internet connection, may not work for some devices or situations

      -

      Dr Fone Screen Unlock also has some disadvantages that you should be aware of before using it. Some of them are:

      -
        -
      • It is not free: Dr Fone Screen Unlock is a paid software that requires you to purchase a license to use it. The price varies depending on the type of license and the duration of use. You can check the official website for more details on the pricing and payment options.
      • -
      • It requires internet connection: Dr Fone Screen Unlock needs an internet connection to download or extract the firmware package for your device. This means that you cannot use it offline or in areas with poor network coverage. You also need to make sure that your internet speed is stable and fast enough to avoid interruptions or errors.
      • -
      • It may not work for some devices or situations: Dr Fone Screen Unlock may not be able to unlock your device if it has a hardware problem, a severe software issue, a custom ROM, a rooted or jailbroken system, or a complex password. It may also fail to unlock your device if you do not follow the instructions correctly or if you disconnect or use your device during the unlock process. You should always backup your data before using Dr Fone Screen Unlock, just in case something goes wrong.
      • -
      -

      Conclusion and FAQs

      -

      In conclusion, Dr Fone Screen Unlock is a powerful and reliable software that can help you unlock your Android or iOS device in various situations. It has many features that make it easy to use, fast, safe, compatible, and data-preserving (for some devices). However, it also has some drawbacks that you should consider before using it, such as its price, internet requirement, and possible limitations. If you are looking for a tool to unlock your device without any hassle or risk, you should give Dr Fone Screen Unlock a try. You can download it from the official website and follow the steps we have shown you in this article.

      -

      Here are some FAQs that you may have about Dr Fone Screen Unlock:

      -

      Q: Is Dr Fone Screen Unlock legal?

      -

      A: Yes, Dr Fone Screen Unlock is legal as long as you use it for legitimate purposes, such as unlocking your own device or a device that you have permission to unlock. You should not use it for illegal or unethical purposes, such as unlocking someone else's device without their consent or stealing someone's data.

      -

      Q: Is Dr Fone Screen Unlock safe?

      -

      A: Yes, Dr Fone Screen Unlock is safe as long as you download it from the official website and use it according to the instructions. It does not contain any viruses, malware, or spyware. It does not harm your device or data in any way. It does not leak your personal information or data to any third parties. It respects your privacy and security.

      -

      Q: Does Dr Fone Screen Unlock work for all devices?

      -

      A: No, Dr Fone Screen Unlock does not work for all devices. It supports most Android and iOS models and versions, but it may not work for some devices that have a hardware problem, a severe software issue, a custom ROM, a rooted or jailbroken system, or a complex password. You can check the compatibility list on the official website before using Dr Fone Screen Unlock.

      -

      Q: Does Dr Fone Screen Unlock cause data loss?

      -

      A: No, Dr Fone Screen Unlock does not cause data loss for some devices, as long as you choose the right option and follow the instructions. For example, some Samsung and LG devices can be unlocked without data loss. However, for some other devices or situations, data loss may occur. You should always backup your data before using Dr Fone Screen Unlock, just in case something goes wrong.

      -

      Q: How much does Dr Fone Screen Unlock cost?

      -

      A: Dr Fone Screen Unlock is not a free software. It requires you to purchase a license to use it. The price varies depending on the type of license and the duration of use. You can check the official website for more details on the pricing and payment options. You can also enjoy a free trial version before buying the full version.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download Gangstar New York for PC and enjoy the action-packed PVPVE gameplay.md b/spaces/congsaPfin/Manga-OCR/logs/Download Gangstar New York for PC and enjoy the action-packed PVPVE gameplay.md deleted file mode 100644 index 975a9df7b62bf6ddef6098f9de5f09ffa4ba0a54..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download Gangstar New York for PC and enjoy the action-packed PVPVE gameplay.md +++ /dev/null @@ -1,102 +0,0 @@ - -

      Gangstar New York: How to Download and Play the Open Alpha on PC

      -

      Are you looking for a new and exciting game to play on your PC? Do you want to experience the thrill of being a gangster in a near-future New York City? If so, you should check out Gangstar New York, a free-to-play third-person shooter game that lets you compete with other players in a citywide crime competition. In this article, we will tell you what Gangstar New York is, why you should play it, and how to download and play it on your PC. We will also give you some tips and tricks for playing the game effectively. So, let's get started!

      -

      What is Gangstar New York?

      -

      Gangstar New York is a multiplayer online game developed by Gameloft that is currently in open alpha stage. The game is set in a near-future New York City that is under the surveillance of Aurora Inc., a massive tech conglomerate that uses an algorithmic network to stop crimes before they start. However, Aurora Inc. also turns off the cameras for 20-minute intervals when they need plausible deniability to test "experimental technological initiatives" on the citizens.

      -

      gangstar new york download for pc


      Download ->->->-> https://urlca.com/2uO5pj



      -

      During these intervals, 20 aspiring gangsters compete in a citywide crime competition to score as much cash as possible. The winner of each round is crowned Gangstar, and is showered with fame and accolades. The game features an action-packed PVPVE open world where you can explore, parkour, zipline, jetpack, steal, have aerial battles and race around New York. You can also customize your character with different skins inspired by various gangster typologies.

      -

      Why you should play Gangstar New York?

      -

      Gangstar New York is a game that offers a lot of fun and excitement for anyone who loves shooting games, open world games, or gangster games. Here are some of the reasons why you should play Gangstar New York:

      -
        -
      • The game is free-to-play and does not require any purchase or subscription to enjoy. You can download and play the game as much as you want without spending a dime.
      • -
      • The game is fair and balanced and does not give any advantage to players who pay or play more. You can compete with other players on a level playing field, regardless of your level, gear, or skills.
      • -
      • The game is dynamic and unpredictable and does not follow any fixed script or pattern. You can experience different scenarios and outcomes every time you play, depending on the experiments that Aurora Inc. unleashes on the city.
      • -
      • The game is fun and immersive and does not bore or frustrate you with tedious tasks or repetitive missions. You can explore the city at your own pace, engage in various activities, and interact with other players in a lively and realistic environment.
      • -
      -

      So, if you are looking for a game that will keep you entertained and challenged for hours, Gangstar New York is the game for you!

      -

      How to download and play Gangstar New York on PC?

      -

      Now that you know what Gangstar New York is and why you should play it, you might be wondering how to download and play it on your PC. Well, don't worry, because we have got you covered. Here are the steps to download and play Gangstar New York on PC:

      -

      Step 1: Go to the Steam store page of Gangstar New York

      -

      The first thing you need to do is to go to the Steam store page of Gangstar New York. You can do this by clicking on this link: [Gangstar New York on Steam]. Alternatively, you can open your Steam client, go to the Store tab, and search for "Gangstar New York" in the search bar.

      -

      Once you are on the store page, you will see some information about the game, such as its description, screenshots, videos, reviews, and system requirements. You will also see a green button that says "Play Game". Before you click on that button, we recommend that you add the game to your wishlist by clicking on the "+WISHLIST" button below it. This way, you will be notified of any updates or news about the game in the future.

      -

      Step 2: Click on the "Play Game" button

      -

      After you have added the game to your wishlist, you can click on the "Play Game" button to start downloading and installing the game on your PC. You will see a pop-up window that asks you to confirm your choice. Click on the "Yes" button to proceed.

      -

      The game will then be added to your Steam library under the "Games" tab. You will see a progress bar that shows you how much of the game has been downloaded and installed. The game size is about 4 GB, so depending on your internet speed and PC performance, it might take some time to complete. You can check the estimated time remaining by hovering over the progress bar.

      -

      How to play Gangstar New York on Steam for free
      -Gangstar New York PC open alpha gameplay and review
      -Best weapons and skins in Gangstar New York
      -Gangstar New York tips and tricks for beginners
      -Gangstar New York vs GTA: which one is better?
      -How to become the richest Gangstar in near-future New York
      -Gangstar New York system requirements and performance
      -Gangstar New York cheats and hacks for unlimited cash
      -How to join a gang and compete with other players in Gangstar New York
      -Gangstar New York story and characters explained
      -How to customize your character and vehicle in Gangstar New York
      -Gangstar New York multiplayer modes and features
      -Gangstar New York download size and installation guide
      -How to get free character skins in Gangstar New York
      -Gangstar New York best locations and landmarks to visit
      -How to use the jetpack and zipline in Gangstar New York
      -Gangstar New York update and patch notes
      -How to win every match in Gangstar New York
      -Gangstar New York funniest moments and glitches
      -How to stream and record your gameplay of Gangstar New York
      -How to get more followers and fame as a Gangstar in New York
      -Gangstar New York Easter eggs and secrets
      -How to deal with the experiments and challenges in Gangstar New York
      -Gangstar New York best strategies and tactics for PvPVE
      -How to parkour and explore the city in Gangstar New York
      -How to get free Steam keys for Gangstar New York
      -Gangstar New York feedback and suggestions for improvement
      -How to access the beta and early access of Gangstar New York
      -How to get a refund for Gangstar New York on Steam
      -How to contact the developers and support team of Gangstar New York

      -

      Step 3: Launch the game and create your account

      -

      Once the game has been downloaded and installed, you can launch it from your Steam library by double-clicking on its name or right-clicking on it and selecting "Play". You will see a splash screen that shows the game's logo and some loading messages.

      -

      After a few seconds, you will be taken to the main menu of the game. Here, you will see some options such as "Play", "Settings", "Credits", and "Quit". Before you click on "Play", you need to create your Gangstar account by clicking on the "Create Account" button at the bottom right corner of the screen.

      -

      You will then be asked to enter your email address, password, username, and country. Make sure that you enter valid and secure information that you can remember later. You will also need to agree to the terms of service and privacy policy of the game by checking the boxes below. After that, click on the "Create Account" button again to confirm your registration.

      -

      Step 4: Customize your character and join a match

      -

      Congratulations! You have successfully created your Gangstar account and are ready to play the game. The next thing you need to do is to customize your character by choosing a skin from various options. You can do this by clicking on the "Customize" button at the top right corner of the screen.

      -

      You will then see a preview of your character and a list of skins that you can select from. Some skins are unlocked by default, while others require cash or gems to unlock. Cash and gems are the in-game currencies that you can earn by playing the game or buying them with real money. You can also change your skin during the game by finding and using a skin changer device.

      -

      After you have chosen your skin, you can join a match by clicking on the "Play" button at the top left corner of the screen. You will then see a list of available matches that you can join, or you can create your own match by clicking on the "Create Match" button. Each match can have up to 20 players and lasts for 20 minutes. You can also choose the map, mode, and difficulty of the match.

      -

      Once you have joined or created a match, you will be taken to the lobby where you can see the other players and chat with them. You can also invite your friends to join your match by clicking on the "Invite Friends" button at the bottom left corner of the screen. When the match is ready to start, you will see a countdown timer and a "Ready" button. Click on the "Ready" button to confirm that you are ready to play.

      -

      Tips and tricks for playing Gangstar New York on PC

      -

      Now that you know how to download and play Gangstar New York on PC, you might be wondering how to play it well and win the game. Well, don't worry, because we have some tips and tricks for you that will help you improve your skills and performance in the game. Here are some of them:

      -

      Tip 1: Use the map and the mini-map to navigate the city

      -

      One of the most important things to do in Gangstar New York is to use the map and the mini-map to navigate the city and find your way around. The map is a large-scale view of the entire city that shows you all the locations, objectives, and enemies in the game. You can access the map by pressing the "M" key on your keyboard or clicking on the map icon at the top right corner of the screen.

      -

      The mini-map is a small-scale view of your immediate surroundings that shows you your position, direction, and nearby points of interest. You can see the mini-map at the bottom right corner of the screen. You can also zoom in and out of the mini-map by using the mouse wheel or clicking on the plus and minus buttons.

      -

      Using the map and the mini-map will help you plan your route, avoid danger, locate targets, and complete objectives. You can also mark locations on the map by right-clicking on them or pressing the "F" key on your keyboard. This will create a waypoint that will guide you to the marked location.

      -

      Tip 2: Collect cash and weapons from various sources

      -

      Another important thing to do in Gangstar New York is to collect cash and weapons from various sources. Cash is the main currency in the game that you can use to buy weapons, skins, and other items. You can also use cash to score points and win the game. Weapons are the tools that you can use to fight, defend, and survive in the game. You can also use weapons to score points and win the game.

      -

      You can collect cash and weapons from various sources, such as:

      -
        -
      • Looting buildings: You can enter and loot any building in the city by breaking the door or window. You can find cash, weapons, ammo, health kits, and other items inside the buildings. However, be careful, as some buildings may have alarms, traps, or enemies inside.
      • -
      • Stealing vehicles: You can steal any vehicle in the city by approaching it and pressing the "E" key on your keyboard or clicking on the vehicle icon. You can then drive or fly the vehicle around the city. You can also find cash, weapons, ammo, health kits, and other items inside the vehicles. However, be careful, as some vehicles may have locks, alarms, or enemies inside.
      • -
      • Killing players: You can kill any player in the city by shooting, stabbing, punching, or running them over. You can then loot their corpse and take their cash, weapons, ammo, health kits, and other items. However, be careful, as some players may have friends, allies, or enemies nearby.
      • -
      -

      Collecting cash and weapons from various sources will help you increase your arsenal, improve your equipment, and boost your score.

      -

      Tip 3: Adapt to the experiments and use them to your advantage

      -

      One of the most unique and exciting features of Gangstar New York is the experiments that Aurora Inc. conducts on the city during each match. These experiments are random events that affect the game parameters, such as gravity, weather, physics, lighting, sound, and more. Some examples of experiments are lunar gravity, electric floor, heavy fog, silent night, zombie apocalypse, and more.

      -

      These experiments can have positive or negative effects on your gameplay, depending on how you adapt to them and use them to your advantage. For example, lunar gravity can make you jump higher and farther, Betraying other players can help you gain cash faster, obtain items easier, and eliminate enemies better. -

    11. Stealing: You can steal from other players by taking their cash or items without their consent or knowledge. You can also steal from other players by hacking their vehicles or devices. Stealing from other players can help you increase your score faster, improve your equipment easier, and weaken your enemies better.
    12. - -

      Working with or against other players will help you create your own style of gameplay, make your own decisions, and face your own consequences.

      -

      Conclusion

      -

      Gangstar New York is a free-to-play third-person shooter game that lets you compete with other players in a citywide crime competition in a near-future New York City. The game features an action-packed PVPVE open world, a level playing field, and dynamic experiments. You can download and play the game on your PC by following the steps we have provided in this article. You can also improve your skills and performance in the game by following the tips and tricks we have shared in this article. So, what are you waiting for? Download Gangstar New York today and become the next Gangstar!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about Gangstar New York:

      -
        -
      1. Is Gangstar New York available on other platforms?
      2. -

        Yes, Gangstar New York is also available on Android and iOS devices. You can download the game from the Google Play Store or the App Store respectively.

        -
      3. Is Gangstar New York a single-player or a multiplayer game?
      4. -

        Gangstar New York is primarily a multiplayer game that requires an internet connection to play. However, you can also play the game in single-player mode by creating a private match and playing alone or with bots.

        -
      5. How long is the open alpha stage of Gangstar New York?
      6. -

        The open alpha stage of Gangstar New York is expected to last for several months, during which the developers will collect feedback, fix bugs, and add new features to the game. The exact date of the end of the open alpha stage is not yet announced.

        -
      7. How can I report a bug or a problem in Gangstar New York?
      8. -

        You can report a bug or a problem in Gangstar New York by using the in-game feedback system or by contacting the customer support team via email or social media. You can find more information about how to report a bug or a problem on the official website of the game.

        -
      9. How can I get more cash or gems in Gangstar New York?
      10. -

        You can get more cash or gems in Gangstar New York by playing the game regularly, completing objectives, killing enemies, looting buildings, stealing vehicles, and winning matches. You can also get more cash or gems by buying them with real money through the in-game store.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Get Ready for the Most Exciting 3D Shooter Game Ever with Special Forces Group 2 1.6 MOD APK - Unlimited Money and Everything Else You Need.md b/spaces/congsaPfin/Manga-OCR/logs/Get Ready for the Most Exciting 3D Shooter Game Ever with Special Forces Group 2 1.6 MOD APK - Unlimited Money and Everything Else You Need.md deleted file mode 100644 index fbacf8478ad574e37843bf56b33567510a83d716..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Get Ready for the Most Exciting 3D Shooter Game Ever with Special Forces Group 2 1.6 MOD APK - Unlimited Money and Everything Else You Need.md +++ /dev/null @@ -1,112 +0,0 @@ -
      -

      Special Forces Group 2 1.6 Mod Apk: A Thrilling FPS Game for Android

      -

      If you are a fan of first-person shooter (FPS) games, you might have heard of Special Forces Group 2, a popular 3D FPS game for Android devices. But did you know that there is a modded version of this game that gives you unlimited money and weapons? In this article, we will tell you everything you need to know about Special Forces Group 2 1.6 Mod Apk, including how to download and install it, what are its features, and what are its pros and cons.

      -

      What is Special Forces Group 2?

      -

      A 3D first-person shooter game with various modes and features

      -

      Special Forces Group 2 is a 3D FPS game that lets you experience real-time shooting action on your Android device. You can play this game in single-player mode with bots or in multiplayer mode online or via Wi-Fi router. You can choose from nine different game modes, such as Classic, Resurrection, Capture the Flag, Zombie Mode, Bomb Mode, Knives, Deathmatch, Arms Race, and Sniper. You can also customize your weapons, skins, characters, and maps according to your preference.

      -

      special forces group 2 1.6 mod apk


      Download Zip ::: https://urlca.com/2uO7d9



      -

      A modded version of the original game with unlimited money and weapons

      -

      Special Forces Group 2 1.6 Mod Apk is a modified version of the original game that gives you access to unlimited money and weapons. This means that you can buy any weapon or skin you want without worrying about your budget. You can also unlock all the maps and characters in the game without completing any missions or achievements. With this modded version, you can enjoy the game without any limitations or restrictions.

      -

      How to download and install Special Forces Group 2 1.6 Mod Apk?

      -

      The steps to download and install the mod apk file

      -

      To download and install Special Forces Group 2 1.6 Mod Apk on your Android device, you need to follow these steps:

      -
        -
      1. Go to the link Special Forces Group 2 1.6 Mod Apk Download and click on the download button.
      2. -
      3. Wait for the mod apk file to be downloaded on your device.
      4. -
      5. Go to your device settings and enable the option to install apps from unknown sources.
      6. -
      7. Locate the mod apk file in your file manager and tap on it to start the installation process.
      8. -
      9. Follow the instructions on the screen and wait for the installation to be completed.
      10. -
      11. Launch the game and enjoy the modded features.
      12. -
      -

      The precautions to take before installing the mod apk file

      -

      Before you install Special Forces Group 2 1.6 Mod Apk on your device, you should take some precautions to avoid any problems or risks. Here are some tips to follow:

      -

      special forces group 2 1.6 hack apk
      -special forces group 2 1.6 unlimited money apk
      -special forces group 2 1.6 mod menu apk
      -special forces group 2 1.6 all weapons unlocked apk
      -special forces group 2 1.6 god mode apk
      -special forces group 2 1.6 latest version mod apk
      -special forces group 2 1.6 offline mod apk
      -special forces group 2 1.6 no ads mod apk
      -special forces group 2 1.6 mega mod apk
      -special forces group 2 1.6 premium mod apk
      -special forces group 2 version 1.6 mod apk download
      -special forces group 2 v1.6 mod apk free download
      -special forces group 2 v1.6 mod apk android 1
      -special forces group 2 v1.6 mod apk revdl
      -special forces group 2 v1.6 mod apk happymod
      -special forces group 2 v1.6 mod apk rexdl
      -special forces group 2 v1.6 mod apk apkpure
      -special forces group 2 v1.6 mod apk apkdone
      -special forces group 2 v1.6 mod apk an1
      -special forces group 2 v1.6 mod apk mob.org
      -how to install special forces group 2 1.6 mod apk
      -how to play special forces group 2 1.6 mod apk online
      -how to update special forces group 2 1.6 mod apk
      -how to get special forces group 2 1.6 mod apk for free
      -how to download special forces group 2 1.6 mod apk on pc
      -best settings for special forces group 2 1.6 mod apk
      -best maps for special forces group 2 1.6 mod apk
      -best weapons for special forces group 2 1.6 mod apk
      -best skins for special forces group 2 1.6 mod apk
      -best game mode for special forces group 2 1.6 mod apk
      -cheats for special forces group 2 1.6 mod apk
      -tips and tricks for special forces group 2 1.6 mod apk
      -gameplay of special forces group 2 1.6 mod apk
      -review of special forces group 2 1.6 mod apk
      -features of special forces group 2 1.6 mod apk
      -benefits of using special forces group 2 1.6 mod apk
      -disadvantages of using special forces group 2 1.6 mod apk
      -alternatives to special forces group 2 1.6 mod apk
      -comparison of special forces group 2 and other fps games with mods apks

      -
        -
      • Make sure that you have enough storage space on your device to install the mod apk file.
      • -
      • Make sure that your device meets the minimum requirements to run the game smoothly.
      • -
      • Make sure that you have a stable internet connection to download and play the game online.
      • -
      • Make sure that you have a backup of your original game data in case you want to restore it later.
      • -
      • Make sure that you download the mod apk file from a trusted and reliable source.
      • -
      -

      What are the features of Special Forces Group 2 1.6 Mod Apk?

      -

      The gameplay and graphics of the modded game

      -

      Special Forces Group 2 1.6 Mod Apk offers a realistic and immersive gameplay experience for FPS fans. You can control your character with easy and intuitive touch controls, aim and shoot with precision, and switch between different weapons with a simple tap. You can also use grenades, flashbangs, smoke bombs, and other items to gain an advantage over your enemies. The game also features high-quality 3D graphics, realistic sound effects, and smooth animations that make you feel like you are in a real battlefield.

      -

      The weapons and skins of the modded game

      -

      One of the best features of Special Forces Group 2 1.6 Mod Apk is that it gives you access to unlimited money and weapons. You can buy any weapon or skin you want from the shop without spending a dime. You can choose from over 150 different weapons, such as pistols, rifles, shotguns, snipers, machine guns, rocket launchers, and more. You can also customize your weapons with various attachments, such as scopes, silencers, lasers, and more. You can also change your skin from over 80 different options, such as soldiers, terrorists, zombies, robots, and more.

      -

      The maps and characters of the modded game

      -

      Another great feature of Special Forces Group 2 1.6 Mod Apk is that it gives you access to all the maps and characters in the game without unlocking them. You can play on any of the 30 maps available in the game, such as Desert, City, Snow, Forest, Hospital, Ship, Space Station, and more. You can also play as any of the 12 characters available in the game, such as US Army, Russian Spetsnaz, British SAS, French GIGN, German GSG9, Chinese PLA, and more. You can also create your own custom maps and characters using the built-in editor.

      -

      What are the pros and cons of Special Forces Group 2 1.6 Mod Apk?

      -

      The advantages of playing the modded game

      -

      Playing Special Forces Group 2 1.6 Mod Apk has many advantages over playing the original game. Some of them are:

      -
        -
      • You can enjoy unlimited money and weapons without spending real money.
      • -
      • You can unlock all the maps and characters without completing any missions or achievements.
      • -
      • You can customize your weapons and skins according to your preference.
      • -
      • You can create your own maps and characters using the editor.
      • -
      • You can have more fun and excitement playing with modded features.
      • -
      -

      The disadvantages of playing the modded game

      -

      Playing Special Forces Group 2 1.6 Mod Apk also has some disadvantages over playing the original game. Some of them are:

      -
        -
      • You may face some compatibility issues with some devices or Android versions.
      • -
      • You may encounter some bugs or glitches in the modded game.
      • -
      • You may lose your original game data if you don't back it up before installing the mod apk file.
      • -
      • You may get banned from online servers if you are detected using the modded game.
      • -
      • You may miss out on the updates and new features of the original game.
      • -
      -

      Conclusion

      -

      A summary of the main points of the article

      -

      In conclusion, Special Forces Group 2 1.6 Mod Apk is a thrilling FPS game for Android that gives you unlimited money and weapons, as well as access to all the maps and characters in the game. You can download and install this modded version of the game by following the steps and precautions mentioned in this article. You can also enjoy the realistic and immersive gameplay, graphics, sound effects, and animations of this game. However, you should also be aware of the disadvantages of playing the modded game, such as compatibility issues, bugs, glitches, data loss, ban risk, and update lag.

      -

      A call to action for the readers to try the modded game

      -

      If you are looking for a fun and exciting FPS game for your Android device, you should definitely give Special Forces Group 2 1.6 Mod Apk a try. You will not regret it. You can download the mod apk file from the link below and start playing right away. You can also share your feedback and opinions about this game in the comments section below. We would love to hear from you.

      -

      FAQs

      -

      Q1: Is Special Forces Group 2 1.6 Mod Apk safe to use?

      -

      A1: Yes, Special Forces Group 2 1.6 Mod Apk is safe to use as long as you download it from a trusted and reliable source. However, you should always scan the mod apk file with an antivirus or malware scanner before installing it on your device.

      -

      Q2: Is Special Forces Group 2 1.6 Mod Apk compatible with all Android devices?

      -

      A2: No, Special Forces Group 2 1.6 Mod Apk may not be compatible with some Android devices or versions. You should check the minimum requirements of the game before downloading and installing it on your device.

      -

      Q3: Can I play Special Forces Group 2 1.6 Mod Apk online with other players?

      -

      A3: Yes, you can play Special Forces Group 2 1.6 Mod Apk online with other players via Wi-Fi router or internet connection. However, you may face some difficulties or errors while playing online with the modded game. You may also get banned from online servers if you are detected using the modded game.

      -

      Q4: How can I update Special Forces Group 2 1.6 Mod Apk to the latest version?

      -

      A4: You can update Special Forces Group 2 1.6 Mod Apk to the latest version by downloading and installing the new mod apk file from the same source where you downloaded the previous one. You should also delete the old mod apk file from your device before installing the new one.

      -

      Q5: Where can I find more modded games like Special Forces Group 2 1.6 Mod Apk?

      -

      A5: You can find more modded games like Special Forces Group 2 1.6 Mod Apk on various websites and blogs that offer modded games for Android devices. You can also search for them on Google or other search engines.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Install New State Mobile on Your Android Phone APK and OBB Download Guide.md b/spaces/congsaPfin/Manga-OCR/logs/Install New State Mobile on Your Android Phone APK and OBB Download Guide.md deleted file mode 100644 index 0e7ce98e37f90060e522ef743598d6e8dda74a3f..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Install New State Mobile on Your Android Phone APK and OBB Download Guide.md +++ /dev/null @@ -1,151 +0,0 @@ -
      - - -
      -

      How to Download New State Mobile APK and OBB Files

      -

      New State Mobile is a new battle royale game developed by Krafton, the makers of PUBG. It is set in a futuristic world where players can use drones, combat rolls, ballistic shields, and other advanced technologies to survive. If you are a fan of PUBG or battle royale games in general, you might want to try out New State Mobile as soon as possible.

      -

      new state mobile download apk obb


      Download File >>> https://urlca.com/2uOb1Z



      -

      But how can you download New State Mobile APK and OBB files on your Android device? In this article, we will show you two ways to do it: from Google Play Store or from third-party sources. We will also explain the benefits and risks of downloading New State Mobile APK and OBB files, as well as some tips to make sure you have a smooth gaming experience.

      -

      What is New State Mobile?

      -

      New State Mobile is a new mobile game that is based on PUBG but with a twist. It is set in 2051, where anarchy and chaos have taken over the world. Players have to fight against each other in an 8x8 km map with various terrains and landmarks. The game features realistic graphics, dynamic gunplay, diverse vehicles, and customizable weapons.

      -

      New State Mobile also introduces some new elements to the battle royale genre, such as drones, combat rolls, ballistic shields, deployable bunkers, fire support systems, and more. These features allow players to create their own strategies and tactics to survive. The game also has a global illumination technology that enhances the lighting and shadows in the game.

      -

      Why Download New State Mobile APK and OBB Files?

      -

      New State Mobile APK and OBB files are the files that contain the game data and resources. APK stands for Android Package Kit, which is the file format used to distribute and install applications on Android devices. OBB stands for Opaque Binary Blob, which is a file format used to store large amounts of data that are not required to run the application.

      -

      Downloading New State Mobile APK and OBB files can have some benefits and risks, depending on where you download them from. Let's take a look at some of them.

      -

      How to download PUBG New State APK and OBB files
      -PUBG New State second alpha test APK and OBB links
      -New State Mobile 0.9.32 update APK and OBB download
      -PUBG New State APK and OBB installation guide
      -New State Mobile latest version APK and OBB file size
      -PUBG New State APK and OBB download for Android devices
      -New State Mobile Season 3 APK and OBB features
      -PUBG New State APK and OBB download error fix
      -New State Mobile Underbridge map APK and OBB download
      -PUBG New State APK and OBB compatibility check
      -New State Mobile M110A1 gun APK and OBB download
      -PUBG New State APK and OBB download for PC
      -New State Mobile Carry feature APK and OBB download
      -PUBG New State APK and OBB download for iOS devices
      -New State Mobile optimization tips APK and OBB download
      -PUBG New State APK and OBB download for low-end devices
      -New State Mobile graphics settings APK and OBB download
      -PUBG New State APK and OBB download for India
      -New State Mobile gameplay review APK and OBB download
      -PUBG New State APK and OBB download for emulator
      -New State Mobile best weapons APK and OBB download
      -PUBG New State APK and OBB download for Samsung phones
      -New State Mobile best attachments APK and OBB download
      -PUBG New State APK and OBB download for OnePlus phones
      -New State Mobile best vehicles APK and OBB download
      -PUBG New State APK and OBB download for Xiaomi phones
      -New State Mobile best skins APK and OBB download
      -PUBG New State APK and OBB download for Huawei phones
      -New State Mobile best strategies APK and OBB download
      -PUBG New State APK and OBB download for Vivo phones
      -New State Mobile best perks APK and OBB download
      -PUBG New State APK and OBB download for Oppo phones
      -New State Mobile best loadouts APK and OBB download
      -PUBG New State APK and OBB download for Realme phones
      -New State Mobile best modes APK and OBB download
      -PUBG New State APK and OBB download for Motorola phones
      -New State Mobile best maps APK and OBB download
      -PUBG New State APK and OBB download for Nokia phones
      -New State Mobile best characters APK and OBB download
      -PUBG New State APK and OBB download for LG phones
      -New State Mobile best outfits APK and OBB download
      -PUBG New State APK and OBB download for Sony phones
      -New State Mobile best emotes APK and OBB download
      -PUBG New State APK and OBB download for Asus phones
      -New State Mobile best crates APK and OBB download
      -PUBG New State APK and OBB download for Lenovo phones
      -New State Mobile best drones APK and OBB download

      -

      Benefits of Downloading New State Mobile APK and OBB Files

      -
        -
      • You can play the game before it is officially released in your region. New State Mobile is currently in the pre-registration stage, which means that it is not available for download in all countries yet. However, if you download the APK and OBB files from a third-party source, you can bypass the regional restrictions and play the game early.
      • -
      • You can access the latest updates and features of the game. Sometimes, the developers may release new updates and features for the game that are not yet available on Google Play Store. If you download the APK and OBB files from a third-party source, you can get access to these updates and features before they are officially released.
      • -
      • You can save storage space on your device. The size of New State Mobile APK and OBB files may vary depending on the source, but they are usually smaller than the size of the game on Google Play Store. This means that you can save some storage space on your device by downloading the APK and OBB files instead of installing the game from Google Play Store.
      • -
      -

      Risks of Downloading New State Mobile APK and OBB Files

      -
        -
      • You may expose your device to malware and viruses. Downloading APK and OBB files from unknown or untrusted sources can be risky, as they may contain malicious code that can harm your device or steal your personal information. You should always be careful when downloading files from third-party sources and scan them with a reliable antivirus software before installing them.
      • -
      • You may violate the terms of service of the game. Downloading APK and OBB files from third-party sources may be considered as a breach of the terms of service of New State Mobile, which can result in your account being banned or suspended. You should always respect the rules and regulations of the game and avoid any actions that may compromise your account security or integrity.
      • -
      • You may encounter errors or bugs in the game. Downloading APK and OBB files from third-party sources may not guarantee that they are compatible with your device or the latest version of the game. You may experience errors or bugs in the game that can affect your gameplay or performance. You should always check the compatibility and quality of the files before downloading them.
      • -

      How to Download New State Mobile APK and OBB Files from Google Play Store?

      -

      If you want to download New State Mobile APK and OBB files from Google Play Store, you will have to wait for the official release date of the game. The game is currently in the pre-registration stage, which means that you can sign up for it and get notified when it is available for download. Here are the steps to download New State Mobile APK and OBB files from Google Play Store:

      -

      Step 1: Check Your Device Compatibility

      -

      New State Mobile is a high-end game that requires a powerful device to run smoothly. According to the official website of the game, the minimum requirements for New State Mobile are as follows:

      - - - - - - - - - - - - - -
      OSRAMCPUGPU
      Android 6.0 or higher2 GB or higherSnapdragon 660 or higherAdreno 512 or higher
      -

      You can check your device specifications by going to Settings > About Phone on your device. If your device meets the minimum requirements, you can proceed to the next step. If not, you may have to upgrade your device or look for other options to play the game.

      -

      Step 2: Pre-Register for New State Mobile on Google Play Store

      -

      To pre-register for New State Mobile on Google Play Store, you will need a Google account and an internet connection. You can follow these steps to pre-register for New State Mobile on Google Play Store:

      -
        -
      1. Open Google Play Store on your device and search for New State Mobile.
      2. -
      3. Tap on the game icon and then tap on the Pre-register button.
      4. -
      5. You will see a confirmation message that says "You're in! You'll be notified when this game is released". Tap on OK.
      6. -
      7. You can also tap on the Learn more button to see more information about the game, such as screenshots, videos, description, and ratings.
      8. -
      9. You can also tap on the Share button to share the game with your friends and invite them to pre-register as well.
      10. -
      -

      Step 3: Wait for the Official Release Date

      -

      After you pre-register for New State Mobile on Google Play Store, you will have to wait for the official release date of the game. The developers have not announced the exact release date yet, but they have said that it will be in 2023. You can check the official website of the game or follow their social media accounts to get the latest news and updates about the game.

      -

      When the game is released, you will get a notification on your device that says "New State Mobile is ready to play". You can tap on the notification to open Google Play Store and download the game. The game will automatically download and install the APK and OBB files on your device. You can then launch the game and enjoy it.

      How to Download New State Mobile APK and OBB Files from Third-Party Sources?

      -

      If you want to download New State Mobile APK and OBB files from third-party sources, you will have to find a reliable and trustworthy source that offers the latest and compatible files for your device. You will also have to enable unknown sources on your device to allow the installation of files from outside Google Play Store. Here are the steps to download New State Mobile APK and OBB files from third-party sources:

      -

      Step 1: Enable Unknown Sources on Your Device

      -

      To enable unknown sources on your device, you will need to go to Settings > Security > Unknown Sources on your device. You will see a toggle switch that says "Allow installation of apps from unknown sources". You will have to turn it on and confirm your choice. This will allow you to install APK and OBB files from third-party sources on your device.

      -

      However, you should be careful when enabling unknown sources, as it can expose your device to malware and viruses. You should only download files from trusted and verified sources and scan them with a reliable antivirus software before installing them. You should also disable unknown sources after installing the files to prevent any unwanted installations in the future.

      -

      Step 2: Find a Reliable Third-Party Source for New State Mobile APK and OBB Files

      -

      To find a reliable third-party source for New State Mobile APK and OBB files, you will have to do some research and check the reviews and ratings of the source. You should also check the compatibility and quality of the files before downloading them. You should avoid any sources that ask for your personal information, such as your email, phone number, or credit card details.

      -

      Some Examples of Reliable Third-Party Sources for New State Mobile APK and OBB Files

      -

      Here are some examples of reliable third-party sources for New State Mobile APK and OBB files that you can try:

      -
        -
      • APKPure: This is a popular website that offers free and safe APK and OBB files for various Android games and apps. You can download New State Mobile APK and OBB files from this website by searching for the game name and clicking on the download button.
      • -
      • APKMirror: This is another popular website that offers free and safe APK and OBB files for various Android games and apps. You can download New State Mobile APK and OBB files from this website by searching for the game name and clicking on the download button.
      • -
      • APKCombo: This is a website that offers free and safe APK and OBB files for various Android games and apps. You can download New State Mobile APK and OBB files from this website by searching for the game name and clicking on the download button.
      • -

      Step 3: Download and Install New State Mobile APK and OBB Files on Your Device

      -

      After you find a reliable third-party source for New State Mobile APK and OBB files, you can download and install them on your device. Here are the steps to download and install New State Mobile APK and OBB files on your device:

      -
        -
      1. Open the website of the third-party source on your device and search for New State Mobile.
      2. -
      3. Tap on the download button and choose the version of the game that you want to download. You will see two files: an APK file and an OBB file. You will have to download both files.
      4. -
      5. After the download is complete, go to the download folder on your device and tap on the APK file to install it. You may see a warning message that says "This type of file can harm your device". Tap on OK to proceed.
      6. -
      7. You will see a screen that says "Do you want to install this application?". Tap on Install and wait for the installation to finish.
      8. -
      9. Do not launch the game yet. You will have to extract the OBB file first.
      10. -
      -

      How to Extract New State Mobile OBB File?

      -

      To extract New State Mobile OBB file, you will need a file manager app that can handle ZIP files, such as ES File Explorer, ZArchiver, or RAR. You can download any of these apps from Google Play Store. Here are the steps to extract New State Mobile OBB file:

      -
        -
      1. Open the file manager app on your device and go to the download folder where you saved the OBB file.
      2. -
      3. Tap and hold on the OBB file and choose Extract from the menu. You will see a folder named com.krafton.newstate.
      4. -
      5. Copy or move this folder to Android > obb on your device storage. If you don't have an obb folder, you can create one.
      6. -
      7. Make sure that the path of the OBB file is Android > obb > com.krafton.newstate > main.1.com.krafton.newstate.obb.
      8. -
      -

      Conclusion

      -

      New State Mobile is a new battle royale game that offers a futuristic twist to the genre. It is developed by Krafton, the makers of PUBG, and features realistic graphics, dynamic gunplay, diverse vehicles, and customizable weapons. It also introduces some new elements, such as drones, combat rolls, ballistic shields, deployable bunkers, fire support systems, and more.

      -

      If you want to play New State Mobile on your Android device, you can download it from Google Play Store or from third-party sources. However, you should be aware of the benefits and risks of downloading New State Mobile APK and OBB files from different sources. You should also follow the steps carefully to ensure that you have a smooth gaming experience.

      -

      We hope that this article has helped you learn how to download New State Mobile APK and OBB files on your Android device. If you have any questions or feedback, please feel free to leave a comment below. Happy gaming!

      -

      FAQs

      -
        -
      • Q: What is the difference between APK and OBB files?
      • -
      • A: APK stands for Android Package Kit, which is the file format used to distribute and install applications on Android devices. OBB stands for Opaque Binary Blob, which is a file format used to store large amounts of data that are not required to run the application. APK and OBB files work together to run an application or a game on an Android device.
      • -
      • Q: Is New State Mobile free to play?
      • -
      • A: Yes, New State Mobile is free to play. However, it may contain in-app purchases or ads that require real money.
      • -
      • Q: Is New State Mobile compatible with iOS devices?
      • -
      • A: No, New State Mobile is currently only compatible with Android devices. The developers have not announced any plans to release the game for iOS devices yet.
      • -
      • Q: How can I update New State Mobile on my device?
      • -
      • A: If you downloaded New State Mobile from Google Play Store, you can update it automatically or manually through Google Play Store. If you downloaded New State Mobile from third-party sources, you will have to download and install the latest version of the APK and OBB files from the same source.
      • -
      • Q: How can I contact the developers of New State Mobile?
      • -
      • A: You can contact the developers of New State Mobile through their official website or their social media accounts. You can also send them an email at newstate @newstate.com.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Join the Frozen Fun with Online Games - Featuring Elsa Anna Olaf and More!.md b/spaces/congsaPfin/Manga-OCR/logs/Join the Frozen Fun with Online Games - Featuring Elsa Anna Olaf and More!.md deleted file mode 100644 index d008f02ebe36b4c329e776db1e7ab38ddf880d4d..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Join the Frozen Fun with Online Games - Featuring Elsa Anna Olaf and More!.md +++ /dev/null @@ -1,102 +0,0 @@ - -

      Frozen Game Online: How to Have Fun with Your Favorite Disney Characters

      -

      Do you love the Disney movie Frozen and its sequel Frozen 2? Do you want to join Anna, Elsa, Olaf, Kristoff, and Sven on their magical adventures in the kingdom of Arendelle? If you answered yes, then you will be happy to know that you can play Frozen games online for free on your computer, tablet, or smartphone. In this article, we will tell you everything you need to know about Frozen games online, including what they are, why you should play them, and how to play them. Let's get started!

      -

      What are Frozen games?

      -

      Frozen games are online games that are inspired by the Frozen movies and feature the characters, settings, and themes from the story. You can find a variety of Frozen games online, such as dress up and makeover games, adventure and puzzle games, cooking and baking games, coloring and painting games, and more. These games are designed to entertain and educate you, as well as to let you express your creativity and imagination. You can play Frozen games online alone or with your friends and family, and have a lot of fun with your favorite Disney characters.

      -

      frozen game online


      Download Filehttps://urlca.com/2uO6Nl



      -

      The Frozen story and the main characters

      -

      The Frozen story is about two sisters, Anna and Elsa, who live in the kingdom of Arendelle. Elsa has the power to create ice and snow, but she struggles to control it and fears hurting her sister. When Elsa accidentally unleashes an eternal winter on the kingdom, she runs away to the mountains. Anna sets out to find her sister and bring her back, along with her friends Olaf, a snowman who loves summer; Kristoff, an ice harvester; and Sven, his loyal reindeer. Along the way, they face many dangers and discover the true meaning of love.

      -

      In Frozen 2, Anna and Elsa embark on a new adventure to discover the origin of Elsa's powers and save Arendelle from a mysterious threat. They travel to an enchanted forest where they meet new allies and enemies, such as the Northuldra people, the elemental spirits of fire, water, earth, and air, and a mysterious voice that calls to Elsa. They also learn more about their parents' past and their own destiny.

      -

      The types of Frozen games you can play online

      -

      There are many types of Frozen games you can play online, depending on your interests and preferences. Here are some examples:

      -

      Dress up and makeover games

      -

      If you love fashion and beauty, you can play dress up and makeover games with Anna and Elsa. You can choose from different outfits, hairstyles, accessories, makeup, and more to create your own look for the princesses. You can also mix and match different styles and colors to create unique combinations. You can also dress up other characters from the Frozen movies, such as Olaf, Kristoff, Sven, or even Hans.

      -

      Adventure and puzzle games

      -

      If you love action and challenges, you can play adventure and puzzle games with Anna and Elsa. You can join them on their quests to save Arendelle from different threats, such as snow monsters, evil princes, or dark magic. You can also help them solve puzzles and riddles along the way. You can also play adventure and puzzle games with other characters from the Frozen movies, such as Olaf, Kristoff, Sven, or even Marshmallow.

      -

      Cooking and baking games

      -

      If you love cooking and baking, you

      can play cooking and baking games with Anna and Elsa. You can learn how to make delicious dishes and desserts, such as chocolate cake, ice cream, pizza, soup, sandwiches, and more. You can also decorate your creations with different toppings, frosting, sprinkles, and more. You can also play cooking and baking games with other characters from the Frozen movies, such as Olaf, Kristoff, Sven, or even Oaken.

      -

      Coloring and painting games

      -

      If you love coloring and painting, you can play coloring and painting games with Anna and Elsa. You can choose from different images of the Frozen characters and scenes, and use different colors and tools to fill them in. You can also draw your own pictures and add stickers, stamps, glitter, and more. You can also play coloring and painting games with other characters from the Frozen movies, such as Olaf, Kristoff, Sven, or even Bruni.

      -

      Why should you play Frozen games online?

      -

      There are many reasons why you should play Frozen games online. Here are some of them:

      -

      Frozen game online free
      -Frozen game online dress up
      -Frozen game online for kids
      -Frozen game online play now
      -Frozen game online cooking
      -Frozen game online makeup
      -Frozen game online coloring
      -Frozen game online adventure
      -Frozen game online puzzle
      -Frozen game online hair salon
      -Frozen game online baby care
      -Frozen game online wedding
      -Frozen game online spa
      -Frozen game online dentist
      -Frozen game online love test
      -Frozen game online fashion contest
      -Frozen game online elsa and anna
      -Frozen game online olaf and sven
      -Frozen game online kristoff and hans
      -Frozen game online disney princesses
      -Frozen game online frozen 2
      -Frozen game online frozen fever
      -Frozen game online frozen rush
      -Frozen game online frozen double trouble
      -Frozen game online frozen block party
      -Frozen game online frozen elsa birth
      -Frozen game online frozen anna pregnant
      -Frozen game online frozen olaf freeze wall
      -Frozen game online frozen elsa magic ice cream
      -Frozen game online frozen anna makeover 2016
      -Frozen game online frozen elsa police agent
      -Frozen game online frozen elsa rainbow baking
      -Frozen game online frozen elsa milking cow
      -Frozen game online frozen elsa wedding kiss
      -Frozen game online frozen anna skin care
      -Frozen game online frozen anna cesarean birth
      -Frozen game online frozen anna laundry day
      -Frozen game online frozen anna babysitter
      -Frozen game online frozen anna flower care
      -Frozen game online frozen anna bathing baby santa
      -Frozen game online frozen elsa helps santa
      -Frozen game online frozen elsa rainy day
      -Frozen game online frozen elsa goes to the dentist
      -Frozen game online frozen elsa and jack love kiss
      -Frozen game online frozen elsa and barbie fashion contest
      -Frozen game online frozen social media trend outfits
      -Frozen game online frozen year round fashionista elsa
      -Frozen game online frozen princess influencer winter wonderland
      -Frozen game online frozen anime cosplay princesses

      -

      They are free and easy to access

      -

      You don't need to pay anything or download anything to play Frozen games online. All you need is a device that can connect to the internet and a browser that supports Flash or HTML5. You can play Frozen games online anytime and anywhere you want, as long as you have a stable internet connection.

      -

      They are fun and educational

      -

      You can have a lot of fun playing Frozen games online, as they offer a variety of gameplay options and challenges. You can also learn new skills and knowledge while playing Frozen games online, such as vocabulary, spelling, math, logic, memory, creativity, and more. You can also improve your hand-eye coordination, reaction time, problem-solving, and decision-making skills while playing Frozen games online.

      -

      They are suitable for all ages and preferences

      -

      You can find Frozen games online that are suitable for all ages and preferences. Whether you are a kid or an adult, a boy or a girl, a fan or a newcomer of the Frozen movies, you can find Frozen games online that match your interests and tastes. You can also choose from different levels of difficulty and complexity while playing Frozen games online.

      -

      How to play Frozen games online?

      -

      Playing Frozen games online is very easy and simple. Here are the steps you need to follow:

      -

      Choose a reliable website that offers Frozen games

      -

      The first step is to choose a reliable website that offers Frozen games online. There are many websites that claim to offer Frozen games online, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, pop-ups, ads, or inappropriate content that may harm your device or your privacy. To avoid these risks, you should choose a website that has a good reputation, positive reviews, and high ratings from other users. You should also check the website's terms of service, privacy policy, and contact information before playing any game.

      -

      Browse through the categories and select a game you like

      -

      The next step is to browse through the categories and select a game you like. You can find different categories of Frozen games online on the website's homepage or menu bar. Some of the common categories are dress up and makeover games, adventure and puzzle games, cooking and baking games, coloring and painting games, and more. You can also use the search bar or the filters to find a specific game you are looking for. Once you find a game you like, click on it to open it in a new tab or window.

      -

      Follow the instructions and enjoy the game

      -

      The last step is to follow the instructions and enjoy the game. Most of the Frozen games online have simple and clear instructions that you can read or listen to before or during the game. You can also see the controls and the objectives of the game on the screen. You can use your mouse, keyboard, touchpad, or touchscreen to play the game, depending on your device. You can also pause, resume, restart, or quit the game at any time. You can also share your results, feedback, or suggestions with other players or the website.

      -

      Conclusion

      -

      Frozen games online are a great way to have fun with your favorite Disney characters. You can play a variety of Frozen games online for free, such as dress up and makeover games, adventure and puzzle games, cooking and baking games, coloring and painting games, and more. You can also learn new skills and knowledge while playing Frozen games online, such as vocabulary, spelling, math, logic, memory, creativity, and more. You can also play Frozen games online with your friends and family, and have a lot of fun together.

      -

      If you want to play Frozen games online, all you need to do is choose a reliable website that offers Frozen games, browse through the categories and select a game you like, follow the instructions and enjoy the game. It's that easy and simple!

      -

      So what are you waiting for? Go ahead and play Frozen games online now and have a blast with Anna, Elsa, Olaf, Kristoff, Sven, and the rest of the Frozen gang!

      -

      FAQs

      -

      Here are some frequently asked questions about Frozen games online:

      -

      Q: Are Frozen games online safe for kids?

      -

      A: Yes, Frozen games online are safe for kids, as long as they play them on a reliable website that does not contain any viruses, malware, pop-ups, ads, or inappropriate content. You should also supervise your kids while they play Frozen games online and limit their screen time.

      -

      Q: Can I play Frozen games online without internet connection?

      -

      A: No, you cannot play Frozen games online without internet connection. You need to have a stable internet connection to access and play Frozen games online. However, some websites may allow you to download or save some Frozen games offline so that you can play them later without internet connection.

      -

      Q: Can I play Frozen games online on any device?

      -

      A: Yes, you can play Frozen games online on any device that can connect to the internet and has a browser that supports Flash or HTML5. You can play Frozen games online on your computer, tablet, or smartphone. However, some Frozen games online may have different features or quality depending on the device you use. You should also make sure that your device has enough storage space and battery life to play Frozen games online.

      -

      Q: Can I play Frozen games online with other players?

      -

      A: Yes, you can play Frozen games online with other players, depending on the game you choose. Some Frozen games online are multiplayer games, which means that you can play them with other players from around the world. You can also play Frozen games online with your friends and family, by sharing the same device or using different devices. You can also chat, compete, or cooperate with other players while playing Frozen games online.

      -

      Q: Can I create my own Frozen games online?

      -

      A: Yes, you can create your own Frozen games online, if you have the skills and tools to do so. You can use different software, platforms, or websites to create your own Frozen games online, such as Scratch, Unity, GameMaker, or Disney Create. You can also share your own Frozen games online with other players and get feedback and ratings from them.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/K-Lite Codec Pack The Ultimate Solution for Windows 10 64-bit Media Playback.md b/spaces/congsaPfin/Manga-OCR/logs/K-Lite Codec Pack The Ultimate Solution for Windows 10 64-bit Media Playback.md deleted file mode 100644 index 3b6f1c28a633787565943a6af97cec1eb77842a0..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/K-Lite Codec Pack The Ultimate Solution for Windows 10 64-bit Media Playback.md +++ /dev/null @@ -1,103 +0,0 @@ - -

      K-Lite Codec Pack 64-bit Windows 10 Free Download: Everything You Need to Know

      -

      If you are looking for a way to play all kinds of audio and video files on your Windows 10 computer, you might have heard of K-Lite Codec Pack. But what is it exactly and how can you download and install it for free? In this article, we will answer these questions and more. We will also show you how to use K-Lite Codec Pack 64-bit Windows 10 to enjoy your media files without any hassle.

      -

      k-lite codec pack 64-bit windows 10 free download


      Download Zip » https://urlca.com/2uO7yM



      -

      What is K-Lite Codec Pack?

      -

      Before we explain what K-Lite Codec Pack is, let's first understand what codecs are and why you need them. Codecs are software components that encode and decode (compress and decompress) audio and video data. They are essential for playing different formats of media files on your computer.

      -

      However, not all codecs are installed by default on your Windows system. Some codecs are proprietary or licensed by certain companies or organizations. This means that you might not be able to play some media files unless you have the right codec installed.

      -

      This is where K-Lite Codec Pack comes in handy. It is a collection of codecs and related tools that can help you play almost any audio and video file on your computer. It also includes some media players, such as Media Player Classic Home Cinema (MPC-HC), that can enhance your viewing experience.

      -

      Some of the features and benefits of K-Lite Codec Pack are:

      -
        -
      • It supports a wide range of audio and video formats, such as MP4, MKV, AVI, FLV, MP3, AAC, FLAC, and more.
      • -
      • It is compatible with Windows 10 and other versions of Windows, both 32-bit and 64-bit.
      • -
      • It is easy to install and update, with various options to suit your preferences and needs.
      • -
      • It is free to use and does not contain any ads, spyware, or malware.
      • -
      • It improves the performance and quality of your media playback, with features such as subtitles, filters, thumbnails, and more.
      • -
      -

      How to Download and Install K-Lite Codec Pack 64-bit Windows 10 for Free?

      -

      Now that you know what K-Lite Codec Pack is and what it can do for you, you might be wondering how to get it on your computer. Here are the steps to follow:

      -
        -
      1. Go to the official website of K-Lite Codec Pack at https://codecguide.com/download_kl.htm. This is the safest and most reliable source to download the software.
      2. -
      3. Choose the version that you want to download. There are four main versions: Basic, Standard, Full, and Mega. Each version has different features and components. You can compare them on the website or read more about them in the FAQs section below.
      4. -
      5. Click on the download link for the version that you want. You will be redirected to a mirror site where you can download the file. Make sure that you download the 64-bit version if you have a 64-bit Windows system.
      6. -
      7. Save the file on your computer and run it. You will see a setup wizard that will guide you through the installation process.
      8. -
      9. Follow the instructions on the screen and choose the options that suit your needs. You can customize the installation by selecting or deselecting the components that you want or don't want. You can also change the default settings if you wish.
      10. -
      11. Finish the installation and restart your computer if prompted. You are now ready to use K-Lite Codec Pack 64-bit Windows 10.
      12. -
      -

      How to Use K-Lite Codec Pack 64-bit Windows 10?

      -

      Once you have installed K-Lite Codec Pack 64-bit Windows 10, you can start playing your audio and video files with ease. Here are some tips on how to use it:

      -
        -
      • To play a media file, you can simply double-click on it or right-click on it and choose "Open with". You will see a list of media players that are compatible with K-Lite Codec Pack, such as MPC-HC, VLC, or Windows Media Player. Choose the one that you prefer and enjoy your media playback.
      • -
      • To update K-Lite Codec Pack, you can either download the latest version from the website or use the update tool that comes with the software. The update tool will check for new updates automatically or manually and install them for you.
      • -
      • To configure K-Lite Codec Pack, you can use the settings application that comes with the software. The settings application will allow you to change various options and preferences for your codecs and media players. You can also access some advanced tools and features, such as codec tweak tool, media info tool, or graph studio tool.
      • -
      • To troubleshoot common issues with K-Lite Codec Pack, you can use the help file that comes with the software. The help file will provide you with useful information and solutions for some of the most common problems that users encounter with K-Lite Codec Pack. You can also visit the official forum or contact the support team if you need further assistance.
      • -
      -

      Conclusion

      -

      K-Lite Codec Pack 64-bit Windows 10 is a great software that can help you play all kinds of audio and video files on your computer. It is free, easy to use, and compatible with Windows 10 and other versions of Windows. It also offers many features and benefits that can enhance your media playback experience. If you are looking for a reliable and comprehensive codec pack for your Windows system, you should definitely give K-Lite Codec Pack a try.

      -

      If you found this article helpful, please share it with your friends and family who might also be interested in K-Lite Codec Pack 64-bit Windows 10 free download. And if you have any questions or feedback about K-Lite Codec Pack, feel free to leave a comment below. We would love to hear from you!

      -

      FAQs

      -

      Q1: Is K-Lite Codec Pack safe to use?

      -

      A1: Yes, K-Lite Codec Pack is safe to use, as long as you download it from the official website or a trusted mirror site. It does not contain any harmful or malicious software, such as viruses, trojans, spyware, or adware. However, you should always be careful when downloading and installing any software from the internet, and scan your files with a reputable antivirus program before running them.

      -

      k-lite codec pack full windows 10 64-bit download
      -k-lite codec pack mega windows 10 64-bit free download
      -k-lite codec pack basic windows 10 64-bit download
      -k-lite codec pack standard windows 10 64-bit free download
      -k-lite codec pack update windows 10 64-bit download
      -download k-lite codec pack for windows 10 64-bit latest version
      -k-lite codec pack for windows 10 64-bit offline installer
      -how to install k-lite codec pack on windows 10 64-bit
      -k-lite codec pack for windows 10 64-bit filehippo
      -k-lite codec pack for windows 10 64-bit techspot
      -k-lite codec pack for windows media player 64-bit
      -k-lite codec pack for vlc player windows 10 64-bit
      -k-lite codec pack for mkv files windows 10 64-bit
      -k-lite codec pack for mp4 files windows 10 64-bit
      -k-lite codec pack for avi files windows 10 64-bit
      -k-lite codec pack for blu-ray playback windows 10 64-bit
      -k-lite codec pack for dvd playback windows 10 64-bit
      -k-lite codec pack for streaming video windows 10 64-bit
      -k-lite codec pack for video editing windows 10 64-bit
      -k-lite codec pack for audio playback windows 10 64-bit
      -k-lite codec pack for flac files windows 10 64-bit
      -k-lite codec pack for mp3 files windows 10 64-bit
      -k-lite codec pack for wav files windows 10 64-bit
      -k-lite codec pack for ac3 files windows 10 64-bit
      -k-lite codec pack for dts files windows 10 64-bit
      -k-lite codec pack with media player classic home cinema
      -k-lite codec pack with madvr renderer windows 10 64-bit
      -k-lite codec pack with lav filters windows 10 64-bit
      -k-lite codec pack with ffdshow filters windows 10 64-bit
      -k-lite codec pack with haali media splitter windows 10 64-bit
      -k-lite codec pack with directvobsub subtitle filter windows 10
      -k-lite codec pack with graphstudio next tool windows 10
      -k-lite codec pack with mediainfo tool windows 10
      -k-lite codec pack with icaros thumbnail provider windows
      -best settings for k-lite codec pack on windows
      -compare different versions of k-lite codec pack
      -alternatives to k-lite codec pack for windows
      -advantages and disadvantages of using k-lite codec
      -how to uninstall or remove k-lite codec
      -how to update or upgrade k-lite
      -how to troubleshoot or fix problems with
      -how to customize or tweak options and preferences
      -how to use or access features and functions of
      -reviews and ratings of
      -testimonials and feedback from users of
      -FAQs and guides on

      -

      Q2: What is the difference between K-Lite Codec Pack Basic, Standard, Full, Mega, and Update?

      -

      A2: The difference between the versions of K-Lite Codec Pack is mainly the number and type of components that they include. Here is a brief overview of each version:

      -
        -
      • K-Lite Codec Pack Basic: This is the smallest and simplest version of K-Lite Codec Pack. It only includes the most essential codecs and tools that you need to play common audio and video formats. It is suitable for users who want a minimalistic and lightweight codec pack.
      • -
      • K-Lite Codec Pack Standard: This is the recommended version of K-Lite Codec Pack for most users. It includes all the components of the Basic version, plus some additional codecs and tools that can improve your media playback quality and compatibility. It also includes Media Player Classic Home Cinema (MPC-HC), a versatile and powerful media player.
      • -
      • K-Lite Codec Pack Full: This is the most complete version of K-Lite Codec Pack for advanced users. It includes all the components of the Standard version, plus some extra codecs and tools that can give you more control and flexibility over your media playback. It also includes some rare and experimental codecs that can play some uncommon audio and video formats.
      • -
      • K-Lite Codec Pack Mega: This is the ultimate version of K-Lite Codec Pack for enthusiasts and experts. It includes all the components of the Full version, plus some additional codecs and tools that are not part of the regular K-Lite Codec Pack. It also includes some other software, such as VLC Media Player, MediaInfo, and Icaros.
      • -
      • K-Lite Codec Pack Update: This is not a standalone version of K-Lite Codec Pack, but an update package that can be used to update an existing installation of K-Lite Codec Pack to the latest version. It only includes the updated components and does not require a full reinstallation.
      • -
      -

      Q3: Does K-Lite Codec Pack work with Windows Media Player?

      -

      A3: Yes, K-Lite Codec Pack works with Windows Media Player (WMP), as well as other media players that use DirectShow filters. However, some formats may not be supported by WMP due to its limitations or restrictions. For example, WMP does not support subtitles or DVD playback by default. In such cases, you may need to use another media player that comes with K-Lite Codec Pack, such as MPC-HC or VLC.

      -

      Q4: How can I uninstall K-Lite Codec Pack from my computer?

      -

      A4: To uninstall K-Lite Codec Pack from your computer, you can use the uninstaller that comes with the software. You can find it in the Start menu under "K-Lite Codec Pack" or in the Control Panel under "Programs and Features". You can also run the installer again and choose the option to uninstall. Follow the instructions on the screen and remove all the components that you want to uninstall.

      -

      Q5: Where can I get more information and support for K-Lite Codec Pack?

      -

      A5: If you want to learn more about K-Lite Codec Pack or need help with using it, you can visit the official website at https://codecguide.com/. There you can find more details about each component, download links, guides, tips, FAQs, forums, contact information, and more.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Red Ball 7 Mod APK A Fun and Challenging Ride Through the Dark World.md b/spaces/congsaPfin/Manga-OCR/logs/Red Ball 7 Mod APK A Fun and Challenging Ride Through the Dark World.md deleted file mode 100644 index 765078b15e43b491fbf589a60e2e81f513268929..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Red Ball 7 Mod APK A Fun and Challenging Ride Through the Dark World.md +++ /dev/null @@ -1,115 +0,0 @@ -
      -

      Red Ball 7 Mod APK: A Fun and Adventure Arcade Game

      -

      If you are looking for a fun and adventure arcade game that will keep you entertained for hours, you should try Red Ball 7 mod APK. This is a modified version of the original Red Ball 7 game that allows you to access all the premium features for free. In this article, we will tell you everything you need to know about Red Ball 7 mod APK, including what it is, how to download and install it, what are the alternatives, and some FAQs.

      -

      red ball 7 mod apk


      DOWNLOAD ✒ ✒ ✒ https://urlca.com/2uObEK



      -

      What is Red Ball 7?

      -

      Red Ball 7 is a free game and one of the best in the world. It is a fun and adventure arcade game that challenges you to take a ride with the red ball through the dark world and try to avoid obstacles. You have to catch the stars for score and run faster with the ball. Mind the stars and the obstacles on the road, because the dumb way to die is to touch one of the monsters there!

      -

      The gameplay of Red Ball 7

      -

      The gameplay of Red Ball 7 is simple but addictive. You have to control the red ball with your finger or mouse and make it jump, dodge, and run until you reach the finish line for every level. There are six levels packs with 106 levels in total, each with different challenges and difficulties. You have to collect as many stars as possible along the way, as they will increase your score and unlock new balls. You also have to avoid falling into pits, spikes, traps, and enemies, as they will make you lose a life. You have three lives in each level, so be careful!

      -

      The features of Red Ball 7

      -

      Red Ball 7 has many features that make it a fun and adventure arcade game. Some of them are:

      -
        -
      • Clean and colorful graphics that create a dark and mysterious atmosphere.
      • -
      • Phone and tablet support that allows you to play on any device.
      • -
      • Smooth and responsive controls that make it easy to play.
      • -
      • Fun and catchy music and sound effects that enhance the gaming experience.
      • -
      • Various balls with different abilities that you can unlock by collecting stars.
      • -
      • Leaderboards and achievements that you can share with your friends.
      • -
      -

      What is a mod APK?

      -

      A mod APK is a modified version of an original Android application package (APK) file that has been altered by someone other than the developer. A mod APK usually offers some advantages over the original app, such as unlocking premium features, removing ads, adding cheats, or enhancing performance.

      -

      The benefits of installing mod APK files

      -

      Installing mod APK files can have some benefits for Android users who want to enjoy more features and options in their apps and games. Some of these benefits are:

      -

      red ball 7 game free download
      -red ball 7 arcade adventure
      -red ball 4 mod apk unlimited lives
      -red ball 4 apk latest version
      -red ball 4 antiheroes adventure
      -red ball 4 mod apk android
      -red ball 7 ghool apps
      -red ball 7 dark world
      -red ball 4 mod apk fdg entertainment
      -red ball 4 mod apk 60 levels
      -red ball 7 fun arcade game
      -red ball 7 apkcombo download
      -red ball 4 mod apk apkasal
      -red ball 4 mod apk gravity field
      -red ball 7 avoid obstacles
      -red ball 4 mod apk arcade category
      -red ball 7 best fun adventure
      -red ball 4 mod apk planet destruction
      -red ball 7 risky trip
      -red ball 4 mod apk bold hero
      -red ball 7 store right now
      -red ball 4 mod apk horrific villains
      -red ball 7 ride with the red ball
      -red ball 4 mod apk secure the world
      -red ball 7 com.ghoolapps.redball7
      -red ball 4 mod apk thrilling levels
      -red ball 7 free arcade game
      -red ball 4 mod apk free download
      -red ball 7 android game
      -red ball 4 mod apk v1.4.21

      -
        -
      • Access all the features of an application without paying anything or waiting for levels.
      • -
      • Enjoy premium features for free of cost, such as unlimited coins, gems, lives, or resources.
      • -
      • Easy to download and install from several trusted sites that offer mod APK files.
      • -
      • Available on every region and mode, regardless of the restrictions imposed by the original app.
      • -
      • Updated with the latest versions and features of the original app.
      • -
      • Great hacks that can make the game easier or more fun.
      • -
      • Allows access to in-app purchases without spending real money.
      • -
      -

      The risks of installing mod APK files

      -

      Installing mod APK files can also have some risks for Android users who want to be careful about their security and privacy. Some of these risks are:

      -
        -
      • Potential malware or viruses that can harm your device or steal your data.
      • -
      • Illegal or unethical activities that can violate the terms and conditions of the original app or the law.
      • -
      • Compatibility issues that can cause crashes, glitches, or errors in the app or the device.
      • -
      • Lack of support or updates from the original developer or the modder.
      • -
      • Bans or penalties from the original app or the game server if detected.
      • -
      • Loss of warranty or guarantee from the device manufacturer or the app developer.
      • -
      -

      How to download and install Red Ball 7 mod APK?

      -

      If you want to download and install Red Ball 7 mod APK on your Android device, you have to follow these steps:

      -

      The steps to download and install Red Ball 7 mod APK

      -
        -
      1. Go to a trusted site that offers Red Ball 7 mod APK file, such as [APKPure] or [APKDone].
      2. -
      3. Click on the download button and wait for the file to be downloaded on your device.
      4. -
      5. Go to your device settings and enable the installation of apps from unknown sources.
      6. -
      7. Locate the downloaded file in your file manager and tap on it to start the installation process.
      8. -
      9. Follow the instructions on the screen and wait for the installation to be completed.
      10. -
      11. Launch the app and enjoy playing Red Ball 7 mod APK with all the premium features unlocked.
      12. -
      -

      The screenshots of Red Ball 7 mod APK

      -

      Here are some screenshots of Red Ball 7 mod APK that show how it looks like and what it offers:

      - - - -
      Red Ball 7 mod APK screenshot 1Red Ball 7 mod APK screenshot 2
      Red Ball 7 mod APK screenshot 3Red Ball 7 mod APK screenshot 4
      -

      What are the alternatives to Red Ball 7 mod APK?

      -

      If you are looking for some other arcade games that you can try instead of or along with Red Ball 7 mod APK, here are some suggestions:

      -

      Some other arcade games that you can try

      -
        -
      • [Red Ball 4]: This is the sequel to Red Ball 7, where you have to save the world from the evil black squares. It has more levels, more enemies, more obstacles, and more fun.
      • -
      • [Fireboy and Watergirl]: This is a co-op game where you have to control two characters with different abilities and help them escape from various temples. It has puzzles, traps, platforms, and teamwork.
      • -
      • [Geometry Dash]: This is a rhythm-based game where you have to jump, fly, and flip through dangerous passages and spiky obstacles. It has music, levels, achievements, and customizations.
      • -
      • [Angry Birds]: This is a classic game where you have to slingshot birds at pigs and destroy their structures. It has physics, humor, characters, and power-ups.
      • -
      -

      FAQs about Red Ball 7 mod APK

      -

      Here are some frequently asked questions about Red Ball 7 mod APK that you might have:

      -
        -
      1. Is Red Ball 7 mod APK safe to download and install?
      2. -

        Red Ball 7 mod APK is generally safe to download and install if you get it from a trusted site that scans the file for malware and viruses. However, you should always be careful when installing apps from unknown sources and check the permissions they require. You should also backup your data before installing any mod APK file.

        -
      3. Is Red Ball 7 mod APK legal to use?
      4. -

        Red Ball 7 mod APK is not legal to use as it violates the intellectual property rights of the original developer. It also breaches the terms and conditions of the original app and may result in bans or penalties if detected. Therefore, you should use Red Ball 7 mod APK at your own risk and responsibility.

        -
      5. Does Red Ball 7 mod APK require root access?
      6. -

        No, Red Ball 7 mod APK does not require root access to work on your Android device. You just need to enable the installation of apps from unknown sources in your device settings and follow the steps mentioned above.

        -
      7. Can I play Red Ball 7 mod APK online or offline?
      8. -

        You can play Red Ball 7 mod APK both online and offline. However, if you want to access the leaderboards and achievements, you need to have an internet connection. You can also play with your friends online by sharing your scores and challenges.

        -
      9. Can I update Red Ball 7 mod APK to the latest version?
      10. -

        Yes, you can update Red Ball 7 mod APK to the latest version if you download it from a site that offers regular updates. However, you may lose some of the mod features or face compatibility issues if you update the app. Therefore, you should always backup your data before updating any mod APK file.

        -
      -

      Conclusion

      -

      Red Ball 7 mod APK is a fun and adventure arcade game that allows you to enjoy all the premium features of the original game for free. You can download and install it easily from a trusted site and play it on your Android device. However, you should also be aware of the risks and legal issues involved in using mod APK files and use them at your own discretion. We hope this article has helped you learn more about Red Ball 7 mod APK and how to use it. If you have any questions or feedback, please feel free to leave a comment below.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Guide to WhatsApp Business Clone APK 2022 Features Benefits and Risks.md b/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Guide to WhatsApp Business Clone APK 2022 Features Benefits and Risks.md deleted file mode 100644 index 251c79e12225d6f816596c2ec316a24183f9c42b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/The Ultimate Guide to WhatsApp Business Clone APK 2022 Features Benefits and Risks.md +++ /dev/null @@ -1,91 +0,0 @@ -
      -

      WhatsApp Business Clone APK 2022: What You Need to Know

      -

      WhatsApp is one of the most popular messaging apps in the world, with over 2 billion users. It allows you to send text messages, voice messages, photos, videos, documents, and more to your contacts. But did you know that there is also a version of WhatsApp designed for businesses? And did you know that there is a way to use multiple WhatsApp accounts on one device? In this article, we will tell you everything you need to know about WhatsApp Business and WhatsApp Business Clone APK in 2022.

      -

      whatsapp business clone apk 2022


      Download ——— https://urlca.com/2uO6WR



      -

      What is WhatsApp Business?

      -

      WhatsApp Business is a separate app from WhatsApp that is designed to help small and medium businesses communicate with their customers and clients. It was launched in 2018 and is available for Android and iOS devices. You can use it to create a business profile, showcase your products and services, send automated messages, and more.

      -

      Features of WhatsApp Business

      -

      Some of the features that WhatsApp Business offers are:

      -
        -
      • Business profile: You can create a professional profile for your business that includes your name, logo, address, website, email, and description.
      • -
      • Catalog: You can showcase your products and services in a catalog that your customers can browse and order from.
      • -
      • Labels: You can organize your chats and contacts with labels such as new customer, pending payment, order complete, etc.
      • -
      • Quick replies: You can save and reuse frequently sent messages such as greetings, thank you notes, FAQs, etc.
      • -
      • Automated messages: You can set up messages that are sent automatically when you are away, busy, or have a specific event.
      • -
      • Statistics: You can view metrics such as how many messages were sent, delivered, read, and received by your customers.
      • -
      -

      Benefits of WhatsApp Business

      -

      Some of the benefits that WhatsApp Business can bring to your business are:

      -
        -
      • Customer engagement: You can connect with your customers in a personal and convenient way, and build trust and loyalty.
      • -
      • Sales growth: You can increase your sales by showcasing your products and services, sending offers and promotions, and accepting orders and payments.
      • -
      • Customer support: You can provide fast and efficient customer service by answering queries, resolving issues, and sending feedback.
      • -
      • Brand awareness: You can increase your brand visibility and reach by creating a professional profile and sharing it with your customers and prospects.
      • -
      -

      What is WhatsApp Business Clone APK?

      -

      WhatsApp Business Clone APK is a modified version of WhatsApp Business that allows you to use multiple WhatsApp accounts on one device. It is not an official app from WhatsApp, but rather a third-party app that is created by developers who modify the original app. It is also known as GBWhatsApp Business or FMWhatsApp Business.

      -

      How to Download and Install WhatsApp Business Clone APK

      -

      To download and install WhatsApp Business Clone APK on your device, you need to follow these steps:

      -

      whatsapp business mod apk latest version 2023
      -fouadmods whatsapp business clone update v21.1
      -blue whatsapp business mod apk download 2023
      -yo whatsapp business clone apk free download
      -ym business whatsapp mod apk 2023
      -whatsapp business clone unclone apk 2022
      -gb whatsapp business mod apk update 2023
      -sam whatsapp business clone apk latest version
      -fouad whatsapp business mod apk download 2022
      -blue whatsapp business clone unclone update v21.1
      -yo whatsapp business mod apk latest version 2023
      -ym business whatsapp clone apk free download
      -whatsapp business clone apk 2022 for android
      -gb whatsapp business clone update v21.1
      -sam whatsapp business mod apk 2023
      -fouad whatsapp business clone unclone apk 2022
      -blue whatsapp business mod apk download 2022
      -yo whatsapp business clone update v21.1
      -ym business whatsapp mod apk latest version 2023
      -whatsapp business clone apk 2022 for ios
      -gb whatsapp business mod apk download 2023
      -sam whatsapp business clone unclone update v21.1
      -fouad whatsapp business mod apk latest version 2023
      -blue whatsapp business clone apk free download
      -yo whatsapp business mod apk download 2022
      -ym business whatsapp clone update v21.1
      -whatsapp business clone apk 2022 for pc
      -gb whatsapp business clone apk latest version
      -sam whatsapp business mod apk free download
      -fouad whatsapp business clone update v21.1
      -blue whatsapp business mod apk latest version 2023
      -yo whatsapp business clone unclone apk 2022
      -ym business whatsapp mod apk download 2022
      -whatsapp business clone apk 2022 for mac
      -gb whatsapp business mod apk free download
      -sam whatsapp business clone update v21.1
      -fouad whatsapp business mod apk free download
      -blue whatsapp business clone update v21.1
      -yo whatsapp business mod apk latest version 2022
      -ym business whatsapp clone unclone apk 2022

      -
        -
      1. Backup your data: Before installing any modded app, it is always recommended to backup your data from the original app. You can do this by going to Settings > Chats > Chat backup in WhatsApp or WhatsApp Business.
      2. -
      3. Uninstall the original app: To avoid any conflicts or errors, you need to uninstall the original app from your device. You can do this by going to Settings > Apps > WhatsApp or WhatsApp Business > Uninstall.
      4. -
      5. Download the modded app: You can download the modded app from any reliable source online. For example, you can download GBWhatsApp Business from [here](^

        Risks of Using WhatsApp Business Clone APK

        -

        While using WhatsApp Business Clone APK may seem tempting, it also comes with some risks that you should be aware of. Some of the risks are:

        -
          -
        • Malware infection: Since WhatsApp Business Clone APK is not an official app from WhatsApp, it may contain malicious code that can harm your device or steal your data. You may also expose your device to viruses, spyware, ransomware, and other threats by downloading the app from untrusted sources.
        • -
        • Account suspension: WhatsApp has a strict policy against using modded or cloned apps that violate its terms of service. If WhatsApp detects that you are using WhatsApp Business Clone APK, it may temporarily or permanently ban your account from using the service. You may also lose access to your chats, contacts, and backups.
        • -
        • Data leakage: WhatsApp Business Clone APK may not offer the same level of security and privacy as the original app. It may not encrypt your messages or backups, or it may share your data with third parties without your consent. You may also compromise your business data and reputation by using an unauthorized app.
        • -
        -

        Conclusion

        -

        WhatsApp Business is a great app for small and medium businesses that want to communicate with their customers and clients in a convenient and professional way. It offers many features and benefits that can help you grow your business and provide better customer service. However, if you want to use multiple WhatsApp accounts on one device, you should be careful about using WhatsApp Business Clone APK. This is a modded app that is not authorized by WhatsApp and may pose some security and privacy risks. You may also face account suspension or data loss if you use it. Therefore, it is advisable to use only official apps from WhatsApp and avoid any clones or mods.

        -

        FAQs

        -

        Here are some frequently asked questions about WhatsApp Business and WhatsApp Business Clone APK:

        -
          -
        1. Can I use both WhatsApp and WhatsApp Business on the same device?
          Yes, you can use both apps on the same device with different phone numbers. However, you cannot use the same phone number for both apps.
        2. -
        3. Can I use WhatsApp Business on multiple devices?
          No, you can only use WhatsApp Business on one device at a time. If you try to log in to another device, you will be logged out from the previous one.
        4. -
        5. Can I backup my WhatsApp Business chats to Google Drive or iCloud?
          Yes, you can backup your chats to Google Drive on Android devices or iCloud on iOS devices. However, these backups are not encrypted and may be accessed by Google or Apple.
        6. -
        7. Can I restore my WhatsApp chats to WhatsApp Business or vice versa?
          No, you cannot restore your chats from one app to another. The chat history is not compatible between the two apps.
        8. -
        9. Can I use WhatsApp Web or Desktop with WhatsApp Business?
          Yes, you can use WhatsApp Web or Desktop with WhatsApp Business by scanning the QR code on your computer screen with your phone.
        10. -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/evaluation/evaluator.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/evaluation/evaluator.py deleted file mode 100644 index 2c85d90eaa5236773a901a68c43c28d42bfd47ec..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/evaluation/evaluator.py +++ /dev/null @@ -1,228 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/evaluator.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -import datetime -import logging -import time -from collections import OrderedDict, abc -from contextlib import ExitStack, contextmanager -from typing import List, Union -import torch -from torch import nn - -from annotator.oneformer.detectron2.utils.comm import get_world_size, is_main_process -from annotator.oneformer.detectron2.utils.logger import log_every_n_seconds - - -class DatasetEvaluator: - """ - Base class for a dataset evaluator. - - The function :func:`inference_on_dataset` runs the model over - all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs. - - This class will accumulate information of the inputs/outputs (by :meth:`process`), - and produce evaluation results in the end (by :meth:`evaluate`). - """ - - def reset(self): - """ - Preparation for a new round of evaluation. - Should be called before starting a round of evaluation. - """ - pass - - def process(self, inputs, outputs): - """ - Process the pair of inputs and outputs. - If they contain batches, the pairs can be consumed one-by-one using `zip`: - - .. code-block:: python - - for input_, output in zip(inputs, outputs): - # do evaluation on single input/output pair - ... - - Args: - inputs (list): the inputs that's used to call the model. - outputs (list): the return value of `model(inputs)` - """ - pass - - def evaluate(self): - """ - Evaluate/summarize the performance, after processing all input/output pairs. - - Returns: - dict: - A new evaluator class can return a dict of arbitrary format - as long as the user can process the results. - In our train_net.py, we expect the following format: - - * key: the name of the task (e.g., bbox) - * value: a dict of {metric name: score}, e.g.: {"AP50": 80} - """ - pass - - -class DatasetEvaluators(DatasetEvaluator): - """ - Wrapper class to combine multiple :class:`DatasetEvaluator` instances. - - This class dispatches every evaluation call to - all of its :class:`DatasetEvaluator`. - """ - - def __init__(self, evaluators): - """ - Args: - evaluators (list): the evaluators to combine. - """ - super().__init__() - self._evaluators = evaluators - - def reset(self): - for evaluator in self._evaluators: - evaluator.reset() - - def process(self, inputs, outputs): - for evaluator in self._evaluators: - evaluator.process(inputs, outputs) - - def evaluate(self): - results = OrderedDict() - for evaluator in self._evaluators: - result = evaluator.evaluate() - if is_main_process() and result is not None: - for k, v in result.items(): - assert ( - k not in results - ), "Different evaluators produce results with the same key {}".format(k) - results[k] = v - return results - - -def inference_on_dataset( - model, data_loader, evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None] -): - """ - Run model on the data_loader and evaluate the metrics with evaluator. - Also benchmark the inference speed of `model.__call__` accurately. - The model will be used in eval mode. - - Args: - model (callable): a callable which takes an object from - `data_loader` and returns some outputs. - - If it's an nn.Module, it will be temporarily set to `eval` mode. - If you wish to evaluate a model in `training` mode instead, you can - wrap the given model and override its behavior of `.eval()` and `.train()`. - data_loader: an iterable object with a length. - The elements it generates will be the inputs to the model. - evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark, - but don't want to do any evaluation. - - Returns: - The return value of `evaluator.evaluate()` - """ - num_devices = get_world_size() - logger = logging.getLogger(__name__) - logger.info("Start inference on {} batches".format(len(data_loader))) - - total = len(data_loader) # inference data loader must have a fixed length - if evaluator is None: - # create a no-op evaluator - evaluator = DatasetEvaluators([]) - if isinstance(evaluator, abc.MutableSequence): - evaluator = DatasetEvaluators(evaluator) - evaluator.reset() - - num_warmup = min(5, total - 1) - start_time = time.perf_counter() - total_data_time = 0 - total_compute_time = 0 - total_eval_time = 0 - with ExitStack() as stack: - if isinstance(model, nn.Module): - stack.enter_context(inference_context(model)) - stack.enter_context(torch.no_grad()) - - start_data_time = time.perf_counter() - for idx, inputs in enumerate(data_loader): - total_data_time += time.perf_counter() - start_data_time - if idx == num_warmup: - start_time = time.perf_counter() - total_data_time = 0 - total_compute_time = 0 - total_eval_time = 0 - - start_compute_time = time.perf_counter() - outputs = model(inputs) - if torch.cuda.is_available(): - torch.cuda.synchronize() - total_compute_time += time.perf_counter() - start_compute_time - - start_eval_time = time.perf_counter() - evaluator.process(inputs, outputs) - total_eval_time += time.perf_counter() - start_eval_time - - iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup) - data_seconds_per_iter = total_data_time / iters_after_start - compute_seconds_per_iter = total_compute_time / iters_after_start - eval_seconds_per_iter = total_eval_time / iters_after_start - total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start - if idx >= num_warmup * 2 or compute_seconds_per_iter > 5: - eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1))) - log_every_n_seconds( - logging.INFO, - ( - f"Inference done {idx + 1}/{total}. " - f"Dataloading: {data_seconds_per_iter:.4f} s/iter. " - f"Inference: {compute_seconds_per_iter:.4f} s/iter. " - f"Eval: {eval_seconds_per_iter:.4f} s/iter. " - f"Total: {total_seconds_per_iter:.4f} s/iter. " - f"ETA={eta}" - ), - n=5, - ) - start_data_time = time.perf_counter() - - # Measure the time only for this worker (before the synchronization barrier) - total_time = time.perf_counter() - start_time - total_time_str = str(datetime.timedelta(seconds=total_time)) - # NOTE this format is parsed by grep - logger.info( - "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format( - total_time_str, total_time / (total - num_warmup), num_devices - ) - ) - total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time))) - logger.info( - "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format( - total_compute_time_str, total_compute_time / (total - num_warmup), num_devices - ) - ) - - results = evaluator.evaluate() - # An evaluator may return None when not in main process. - # Replace it by an empty dict instead to make it easier for downstream code to handle - if results is None: - results = {} - return results - - -@contextmanager -def inference_context(model): - """ - A context where the model is temporarily changed to eval mode, - and restored to previous mode afterwards. - - Args: - model: a torch Module - """ - training_mode = model.training - model.eval() - yield - model.train(training_mode) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/psa_mask.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/psa_mask.py deleted file mode 100644 index cdf14e62b50e8d4dd6856c94333c703bcc4c9ab6..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/ops/psa_mask.py +++ /dev/null @@ -1,92 +0,0 @@ -# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa -from torch import nn -from torch.autograd import Function -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', - ['psamask_forward', 'psamask_backward']) - - -class PSAMaskFunction(Function): - - @staticmethod - def symbolic(g, input, psa_type, mask_size): - return g.op( - 'mmcv::MMCVPSAMask', - input, - psa_type_i=psa_type, - mask_size_i=mask_size) - - @staticmethod - def forward(ctx, input, psa_type, mask_size): - ctx.psa_type = psa_type - ctx.mask_size = _pair(mask_size) - ctx.save_for_backward(input) - - h_mask, w_mask = ctx.mask_size - batch_size, channels, h_feature, w_feature = input.size() - assert channels == h_mask * w_mask - output = input.new_zeros( - (batch_size, h_feature * w_feature, h_feature, w_feature)) - - ext_module.psamask_forward( - input, - output, - psa_type=psa_type, - num_=batch_size, - h_feature=h_feature, - w_feature=w_feature, - h_mask=h_mask, - w_mask=w_mask, - half_h_mask=(h_mask - 1) // 2, - half_w_mask=(w_mask - 1) // 2) - return output - - @staticmethod - def backward(ctx, grad_output): - input = ctx.saved_tensors[0] - psa_type = ctx.psa_type - h_mask, w_mask = ctx.mask_size - batch_size, channels, h_feature, w_feature = input.size() - grad_input = grad_output.new_zeros( - (batch_size, channels, h_feature, w_feature)) - ext_module.psamask_backward( - grad_output, - grad_input, - psa_type=psa_type, - num_=batch_size, - h_feature=h_feature, - w_feature=w_feature, - h_mask=h_mask, - w_mask=w_mask, - half_h_mask=(h_mask - 1) // 2, - half_w_mask=(w_mask - 1) // 2) - return grad_input, None, None, None - - -psa_mask = PSAMaskFunction.apply - - -class PSAMask(nn.Module): - - def __init__(self, psa_type, mask_size=None): - super(PSAMask, self).__init__() - assert psa_type in ['collect', 'distribute'] - if psa_type == 'collect': - psa_type_enum = 0 - else: - psa_type_enum = 1 - self.psa_type_enum = psa_type_enum - self.mask_size = mask_size - self.psa_type = psa_type - - def forward(self, input): - return psa_mask(input, self.psa_type_enum, self.mask_size) - - def __repr__(self): - s = self.__class__.__name__ - s += f'(psa_type={self.psa_type}, ' - s += f'mask_size={self.mask_size})' - return s diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/optimizer/__init__.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/optimizer/__init__.py deleted file mode 100644 index 53c34d0470992cbc374f29681fdd00dc0e57968d..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmcv/runner/optimizer/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer, - build_optimizer_constructor) -from .default_constructor import DefaultOptimizerConstructor - -__all__ = [ - 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor', - 'build_optimizer', 'build_optimizer_constructor' -] diff --git a/spaces/crytion/DeepNude/README.md b/spaces/crytion/DeepNude/README.md deleted file mode 100644 index a616776efe2480784f0ba92acdf5d4d4755f440a..0000000000000000000000000000000000000000 --- a/spaces/crytion/DeepNude/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: DeepNude -emoji: 💩💩💩 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cvlab/zero123-live/ldm/models/diffusion/ddpm.py b/spaces/cvlab/zero123-live/ldm/models/diffusion/ddpm.py deleted file mode 100644 index a1684aaee57f3a90c3d90b2fbf8e0b58fb21652d..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,1994 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager, nullcontext -from functools import partial -import itertools -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only -from omegaconf import ListConfig - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler -from ldm.modules.attention import CrossAttention - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - make_it_fit=False, - ucg_training=None, - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - self.make_it_fit = make_it_fit - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - self.ucg_training = ucg_training or dict() - if self.ucg_training: - self.ucg_prng = np.random.RandomState() - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - @torch.no_grad() - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - - if self.make_it_fit: - n_params = len([name for name, _ in - itertools.chain(self.named_parameters(), - self.named_buffers())]) - for name, param in tqdm( - itertools.chain(self.named_parameters(), - self.named_buffers()), - desc="Fitting old weights to new weights", - total=n_params - ): - if not name in sd: - continue - old_shape = sd[name].shape - new_shape = param.shape - assert len(old_shape)==len(new_shape) - if len(new_shape) > 2: - # we only modify first two axes - assert new_shape[2:] == old_shape[2:] - # assumes first axis corresponds to output dim - if not new_shape == old_shape: - new_param = param.clone() - old_param = sd[name] - if len(new_shape) == 1: - for i in range(new_param.shape[0]): - new_param[i] = old_param[i % old_shape[0]] - elif len(new_shape) >= 2: - for i in range(new_param.shape[0]): - for j in range(new_param.shape[1]): - new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] - - n_used_old = torch.ones(old_shape[1]) - for j in range(new_param.shape[1]): - n_used_old[j % old_shape[1]] += 1 - n_used_new = torch.zeros(new_shape[1]) - for j in range(new_param.shape[1]): - n_used_new[j] = n_used_old[j % old_shape[1]] - - n_used_new = n_used_new[None, :] - while len(n_used_new.shape) < len(new_shape): - n_used_new = n_used_new.unsqueeze(-1) - new_param /= n_used_new - - sd[name] = new_param - - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - for k in self.ucg_training: - p = self.ucg_training[k]["p"] - val = self.ucg_training[k]["val"] - if val is None: - val = "" - for i in range(len(batch[k])): - if self.ucg_prng.choice(2, p=[1-p, p]): - batch[k][i] = val - - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - unet_trainable=True, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.unet_trainable = unet_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - - # construct linear projection layer for concatenating image CLIP embedding and RT - self.cc_projection = nn.Linear(772, 768) - nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) - nn.init.zeros_(list(self.cc_projection.parameters())[1]) - self.cc_projection.requires_grad_(True) - - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None, uncond=0.05): - x = super().get_input(batch, k) - T = batch['T'].to(memory_format=torch.contiguous_format).float() - - if bs is not None: - x = x[:bs] - T = T[:bs].to(self.device) - - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - cond_key = cond_key or self.cond_stage_key - xc = super().get_input(batch, cond_key).to(self.device) - if bs is not None: - xc = xc[:bs] - cond = {} - - # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. - random = torch.rand(x.size(0), device=x.device) - prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") - input_mask = 1 - rearrange((random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1") - null_prompt = self.get_learned_conditioning([""]) - - # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] - # print('=========== xc shape ===========', xc.shape) - with torch.enable_grad(): - clip_emb = self.get_learned_conditioning(xc).detach() - null_prompt = self.get_learned_conditioning([""]).detach() - cond["c_crossattn"] = [self.cc_projection(torch.cat([torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :]], dim=-1))] - cond["c_concat"] = [input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()] - out = [z, cond] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - # @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - # if self.cond_stage_trainable: - # c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - adapted_cond = self.get_learned_conditioning(adapted_cond) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, - shape, cond, verbose=False, **kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True, **kwargs) - - return samples, intermediates - - @torch.no_grad() - def get_unconditional_conditioning(self, batch_size, null_label=None, image_size=512): - if null_label is not None: - xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - if hasattr(xc, "to"): - xc = xc.to(self.device) - c = self.get_learned_conditioning(xc) - else: - # todo: get null label from cond_stage_model - raise NotImplementedError() - c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) - cond = {} - cond["c_crossattn"] = [c] - cond["c_concat"] = [torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(self.device)] - return cond - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if unconditional_guidance_scale > 1.0: - uc = self.get_unconditional_conditioning(N, unconditional_guidance_label, image_size=x.shape[-1]) - # uc = torch.zeros_like(c) - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - mask = 1. - mask - with ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = [] - if self.unet_trainable == "attn": - print("Training only unet attention layers") - for n, m in self.model.named_modules(): - if isinstance(m, CrossAttention) and n.endswith('attn2'): - params.extend(m.parameters()) - if self.unet_trainable == "conv_in": - print("Training only unet input conv layers") - params = list(self.model.diffusion_model.input_blocks[0][0].parameters()) - elif self.unet_trainable is True or self.unet_trainable == "all": - print("Training the full unet") - params = list(self.model.parameters()) - else: - raise ValueError(f"Unrecognised setting for unet_trainable: {self.unet_trainable}") - - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - - if self.cc_projection is not None: - params = params + list(self.cc_projection.parameters()) - print('========== optimizing for cc projection weight ==========') - - opt = torch.optim.AdamW([{"params": self.model.parameters(), "lr": lr}, - {"params": self.cc_projection.parameters(), "lr": 10. * lr}], lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm', 'hybrid-adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None, c_adm=None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - # c_crossattn dimension: torch.Size([8, 1, 768]) 1 - # cc dimension: torch.Size([8, 1, 768] - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'hybrid-adm': - assert c_adm is not None - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc, y=c_adm) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class LatentUpscaleDiffusion(LatentDiffusion): - def __init__(self, *args, low_scale_config, low_scale_key="LR", **kwargs): - super().__init__(*args, **kwargs) - # assumes that neither the cond_stage nor the low_scale_model contain trainable params - assert not self.cond_stage_trainable - self.instantiate_low_stage(low_scale_config) - self.low_scale_key = low_scale_key - - def instantiate_low_stage(self, config): - model = instantiate_from_config(config) - self.low_scale_model = model.eval() - self.low_scale_model.train = disabled_train - for param in self.low_scale_model.parameters(): - param.requires_grad = False - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): - if not log_mode: - z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) - else: - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - x_low = batch[self.low_scale_key][:bs] - x_low = rearrange(x_low, 'b h w c -> b c h w') - x_low = x_low.to(memory_format=torch.contiguous_format).float() - zx, noise_level = self.low_scale_model(x_low) - all_conds = {"c_concat": [zx], "c_crossattn": [c], "c_adm": noise_level} - #import pudb; pu.db - if log_mode: - # TODO: maybe disable if too expensive - interpretability = False - if interpretability: - zx = zx[:, :, ::2, ::2] - x_low_rec = self.low_scale_model.decode(zx) - return z, all_conds, x, xrec, xc, x_low, x_low_rec, noise_level - return z, all_conds - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, - unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc, x_low, x_low_rec, noise_level = self.get_input(batch, self.first_stage_key, bs=N, - log_mode=True) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - log["x_lr"] = x_low - log[f"x_lr_rec_@noise_levels{'-'.join(map(lambda x: str(x), list(noise_level.cpu().numpy())))}"] = x_low_rec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) - # TODO explore better "unconditional" choices for the other keys - # maybe guide away from empty text label and highest noise level and maximally degraded zx? - uc = dict() - for k in c: - if k == "c_crossattn": - assert isinstance(c[k], list) and len(c[k]) == 1 - uc[k] = [uc_tmp] - elif k == "c_adm": # todo: only run with text-based guidance? - assert isinstance(c[k], torch.Tensor) - uc[k] = torch.ones_like(c[k]) * self.low_scale_model.max_noise_level - elif isinstance(c[k], list): - uc[k] = [c[k][i] for i in range(len(c[k]))] - else: - uc[k] = c[k] - - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - if plot_progressive_rows: - with ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - return log - - -class LatentInpaintDiffusion(LatentDiffusion): - """ - can either run as pure inpainting model (only concat mode) or with mixed conditionings, - e.g. mask as concat and text via cross-attn. - To disable finetuning mode, set finetune_keys to None - """ - def __init__(self, - finetune_keys=("model.diffusion_model.input_blocks.0.0.weight", - "model_ema.diffusion_modelinput_blocks00weight" - ), - concat_keys=("mask", "masked_image"), - masked_image_key="masked_image", - keep_finetune_dims=4, # if model was trained without concat mode before and we would like to keep these channels - c_concat_log_start=None, # to log reconstruction of c_concat codes - c_concat_log_end=None, - *args, **kwargs - ): - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", list()) - super().__init__(*args, **kwargs) - self.masked_image_key = masked_image_key - assert self.masked_image_key in concat_keys - self.finetune_keys = finetune_keys - self.concat_keys = concat_keys - self.keep_dims = keep_finetune_dims - self.c_concat_log_start = c_concat_log_start - self.c_concat_log_end = c_concat_log_end - if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint' - if exists(ckpt_path): - self.init_from_ckpt(ckpt_path, ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - - # make it explicit, finetune by including extra input channels - if exists(self.finetune_keys) and k in self.finetune_keys: - new_entry = None - for name, param in self.named_parameters(): - if name in self.finetune_keys: - print(f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only") - new_entry = torch.zeros_like(param) # zero init - assert exists(new_entry), 'did not find matching parameter to modify' - new_entry[:, :self.keep_dims, ...] = sd[k] - sd[k] = new_entry - - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False): - # note: restricted to non-trainable encoders currently - assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting' - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - - assert exists(self.concat_keys) - c_cat = list() - for ck in self.concat_keys: - cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() - if bs is not None: - cc = cc[:bs] - cc = cc.to(self.device) - bchw = z.shape - if ck != self.masked_image_key: - cc = torch.nn.functional.interpolate(cc, size=bchw[-2:]) - else: - cc = self.get_first_stage_encoding(self.encode_first_stage(cc)) - c_cat.append(cc) - c_cat = torch.cat(c_cat, dim=1) - all_conds = {"c_concat": [c_cat], "c_crossattn": [c]} - if return_first_stage_outputs: - return z, all_conds, x, xrec, xc - return z, all_conds - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, - use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True) - c_cat, c = c["c_concat"][0], c["c_crossattn"][0] - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if not (self.c_concat_log_start is None and self.c_concat_log_end is None): - log["c_concat_decoded"] = self.decode_first_stage(c_cat[:,self.c_concat_log_start:self.c_concat_log_end]) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if unconditional_guidance_scale > 1.0: - uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label) - uc_cat = c_cat - uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]} - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]}, - batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc_full, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - - log["masked_image"] = rearrange(batch["masked_image"], - 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float() - return log - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs - - -class SimpleUpscaleDiffusion(LatentDiffusion): - def __init__(self, *args, low_scale_key="LR", **kwargs): - super().__init__(*args, **kwargs) - # assumes that neither the cond_stage nor the low_scale_model contain trainable params - assert not self.cond_stage_trainable - self.low_scale_key = low_scale_key - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): - if not log_mode: - z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) - else: - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - x_low = batch[self.low_scale_key][:bs] - x_low = rearrange(x_low, 'b h w c -> b c h w') - x_low = x_low.to(memory_format=torch.contiguous_format).float() - - encoder_posterior = self.encode_first_stage(x_low) - zx = self.get_first_stage_encoding(encoder_posterior).detach() - all_conds = {"c_concat": [zx], "c_crossattn": [c]} - - if log_mode: - # TODO: maybe disable if too expensive - interpretability = False - if interpretability: - zx = zx[:, :, ::2, ::2] - return z, all_conds, x, xrec, xc, x_low - return z, all_conds - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, - unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc, x_low = self.get_input(batch, self.first_stage_key, bs=N, log_mode=True) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - log["x_lr"] = x_low - - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - - if unconditional_guidance_scale > 1.0: - uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) - uc = dict() - for k in c: - if k == "c_crossattn": - assert isinstance(c[k], list) and len(c[k]) == 1 - uc[k] = [uc_tmp] - elif isinstance(c[k], list): - uc[k] = [c[k][i] for i in range(len(c[k]))] - else: - uc[k] = c[k] - - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - return log - -class MultiCatFrameDiffusion(LatentDiffusion): - def __init__(self, *args, low_scale_key="LR", **kwargs): - super().__init__(*args, **kwargs) - # assumes that neither the cond_stage nor the low_scale_model contain trainable params - assert not self.cond_stage_trainable - self.low_scale_key = low_scale_key - - @torch.no_grad() - def get_input(self, batch, k, cond_key=None, bs=None, log_mode=False): - n = 2 - if not log_mode: - z, c = super().get_input(batch, k, force_c_encode=True, bs=bs) - else: - z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True, - force_c_encode=True, return_original_cond=True, bs=bs) - cat_conds = batch[self.low_scale_key][:bs] - cats = [] - for i in range(n): - x_low = cat_conds[:,:,:,3*i:3*(i+1)] - x_low = rearrange(x_low, 'b h w c -> b c h w') - x_low = x_low.to(memory_format=torch.contiguous_format).float() - encoder_posterior = self.encode_first_stage(x_low) - zx = self.get_first_stage_encoding(encoder_posterior).detach() - cats.append(zx) - - all_conds = {"c_concat": [torch.cat(cats, dim=1)], "c_crossattn": [c]} - - if log_mode: - # TODO: maybe disable if too expensive - interpretability = False - if interpretability: - zx = zx[:, :, ::2, ::2] - return z, all_conds, x, xrec, xc, x_low - return z, all_conds - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, - unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, - **kwargs): - ema_scope = self.ema_scope if use_ema_scope else nullcontext - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc, x_low = self.get_input(batch, self.first_stage_key, bs=N, log_mode=True) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - log["x_lr"] = x_low - - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption", "txt"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2]//25) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2]//25) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if sample: - # get denoise row - with ema_scope("Sampling"): - samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - - if unconditional_guidance_scale > 1.0: - uc_tmp = self.get_unconditional_conditioning(N, unconditional_guidance_label) - uc = dict() - for k in c: - if k == "c_crossattn": - assert isinstance(c[k], list) and len(c[k]) == 1 - uc[k] = [uc_tmp] - elif isinstance(c[k], list): - uc[k] = [c[k][i] for i in range(len(c[k]))] - else: - uc[k] = c[k] - - with ema_scope("Sampling with classifier-free guidance"): - samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, - ddim_steps=ddim_steps, eta=ddim_eta, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=uc, - ) - x_samples_cfg = self.decode_first_stage(samples_cfg) - log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg - return log diff --git a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/options/base_options.py b/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/options/base_options.py deleted file mode 100644 index d8f921d5a43434ae802a55a0fa3889c4b7ab9f6d..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/Demo_TFR_Pirenderer/src/face3d/options/base_options.py +++ /dev/null @@ -1,169 +0,0 @@ -"""This script contains base options for Deep3DFaceRecon_pytorch -""" - -import argparse -import os -from util import util -import numpy as np -import torch -import face3d.models as models -import face3d.data as data - - -class BaseOptions(): - """This class defines options used during both training and test time. - - It also implements several helper functions such as parsing, printing, and saving the options. - It also gathers additional options defined in functions in both dataset class and model class. - """ - - def __init__(self, cmd_line=None): - """Reset the class; indicates the class hasn't been initailized""" - self.initialized = False - self.cmd_line = None - if cmd_line is not None: - self.cmd_line = cmd_line.split() - - def initialize(self, parser): - """Define the common options that are used in both training and test.""" - # basic parameters - parser.add_argument('--name', type=str, default='face_recon', help='name of the experiment. It decides where to store samples and models') - parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') - parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') - parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization') - parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation') - parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel') - parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port') - parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses') - parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard') - parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation') - - # model parameters - parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.') - - # additional parameters - parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') - parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') - parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') - - self.initialized = True - return parser - - def gather_options(self): - """Initialize our parser with basic options(only once). - Add additional model-specific and dataset-specific options. - These options are defined in the function - in model and dataset classes. - """ - if not self.initialized: # check if it has been initialized - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser = self.initialize(parser) - - # get the basic options - if self.cmd_line is None: - opt, _ = parser.parse_known_args() - else: - opt, _ = parser.parse_known_args(self.cmd_line) - - # set cuda visible devices - os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids - - # modify model-related parser options - model_name = opt.model - model_option_setter = models.get_option_setter(model_name) - parser = model_option_setter(parser, self.isTrain) - if self.cmd_line is None: - opt, _ = parser.parse_known_args() # parse again with new defaults - else: - opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults - - # modify dataset-related parser options - if opt.dataset_mode: - dataset_name = opt.dataset_mode - dataset_option_setter = data.get_option_setter(dataset_name) - parser = dataset_option_setter(parser, self.isTrain) - - # save and return the parser - self.parser = parser - if self.cmd_line is None: - return parser.parse_args() - else: - return parser.parse_args(self.cmd_line) - - def print_options(self, opt): - """Print and save options - - It will print both current options and default values(if different). - It will save options into a text file / [checkpoints_dir] / opt.txt - """ - message = '' - message += '----------------- Options ---------------\n' - for k, v in sorted(vars(opt).items()): - comment = '' - default = self.parser.get_default(k) - if v != default: - comment = '\t[default: %s]' % str(default) - message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) - message += '----------------- End -------------------' - print(message) - - # save to the disk - expr_dir = os.path.join(opt.checkpoints_dir, opt.name) - util.mkdirs(expr_dir) - file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) - try: - with open(file_name, 'wt') as opt_file: - opt_file.write(message) - opt_file.write('\n') - except PermissionError as error: - print("permission error {}".format(error)) - pass - - def parse(self): - """Parse our options, create checkpoints directory suffix, and set up gpu device.""" - opt = self.gather_options() - opt.isTrain = self.isTrain # train or test - - # process opt.suffix - if opt.suffix: - suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' - opt.name = opt.name + suffix - - - # set gpu ids - str_ids = opt.gpu_ids.split(',') - gpu_ids = [] - for str_id in str_ids: - id = int(str_id) - if id >= 0: - gpu_ids.append(id) - opt.world_size = len(gpu_ids) - # if len(opt.gpu_ids) > 0: - # torch.cuda.set_device(gpu_ids[0]) - if opt.world_size == 1: - opt.use_ddp = False - - if opt.phase != 'test': - # set continue_train automatically - if opt.pretrained_name is None: - model_dir = os.path.join(opt.checkpoints_dir, opt.name) - else: - model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name) - if os.path.isdir(model_dir): - model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')] - if os.path.isdir(model_dir) and len(model_pths) != 0: - opt.continue_train= True - - # update the latest epoch count - if opt.continue_train: - if opt.epoch == 'latest': - epoch_counts = [int(i.split('.')[0].split('_')[-1]) for i in model_pths if 'latest' not in i] - if len(epoch_counts) != 0: - opt.epoch_count = max(epoch_counts) + 1 - else: - opt.epoch_count = int(opt.epoch) + 1 - - - self.print_options(opt) - self.opt = opt - return self.opt diff --git a/spaces/danielpedriniportfolio/AutoDA/pages/08-Machine_Learning.py b/spaces/danielpedriniportfolio/AutoDA/pages/08-Machine_Learning.py deleted file mode 100644 index acf0423f173fa0aeef4894669313fb19170cc7c9..0000000000000000000000000000000000000000 --- a/spaces/danielpedriniportfolio/AutoDA/pages/08-Machine_Learning.py +++ /dev/null @@ -1,99 +0,0 @@ -import pandas as pd -import streamlit as st -import streamlit.components.v1 as components -import os -from pycaret.classification import ClassificationExperiment -import time - - -def train_model(): - s = ClassificationExperiment() - s.setup(df, target=df_target, session_id=123 ) - best = s.compare_models() - s.evaluate_model(best) - pred_holdout = s.predict_model(best) - new_df = df.copy().drop(df_target, axis=1) - s.save_model(best, 'model') - st.session_state.best = best - leaderbord = s.get_leaderboard() - leaderbord.to_csv('leaderbord.csv') - -def show_leaderbord(): - if st.button('Show Leaderboard'): - df = pd.read_csv('leaderbord.csv') - st.write(df) - - -def reload_data(): - st.write("Reloading data...") - df_original = st.session_state["df_original"] - df = df_original.copy() - st.session_state.df = df - del st.session_state['df_target'] - del st.session_state['best'] - st.experimental_rerun() - - - - - -def save_model(model, name): - ClassificationExperiment.save_model(model, name) - - - - -st.set_page_config(layout="wide") -col1, col2, col3 = st.columns([15, 70, 15]) - -with col1: - st.write("") - - -with col2: - if "df" not in st.session_state: - st.warning("Please upload a CSV file") - else: - st.header("Machine Learning App") - if st.button("Reload data"): - reload_data() - df = st.session_state["df"] - st.dataframe(df.head()) - # check if target exists to start model training and alredy encoded - if "df_target" in st.session_state: - st.subheader("Target") - df_target = st.session_state["df_target"] - st.info(df_target) - # check if target is already encoded - if df[df_target].dtype in ["int64"]: - if df.isnull().sum().sum() == 0: - st.success("Data is ready for model training") - # check if model is already trained to show the button or not - if "best" not in st.session_state: - if st.button("Train the Model"): - s = ClassificationExperiment() - s.setup(df, target=df_target, session_id=123 ) - best = s.compare_models() - s.evaluate_model(best) - pred_holdout = s.predict_model(best) - new_df = df.copy().drop(df_target, axis=1) - s.save_model(best, 'model') - st.session_state.best = best - leaderbord = s.get_leaderboard() - st.bar_chart(leaderbord.sort_values('Accuracy', ascending=False), x='Model Name', y='Accuracy', height=600) - st.dataframe(leaderbord) - if "best" in st.session_state: - with open('model.pkl', 'rb') as f: - best_model = f.read() - st.download_button(label='Download the Model', data=best_model, file_name='model.pkl', mime='application/octet-stream') - - else: - st.warning( - "Data is not ready for model training. Still missing values" - ) - else: - st.warning( - "Data is not ready for model training. Target is not encoded" - ) - else: - st.warning("Data is not ready for model training. Target is missing") diff --git a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/face_detector.py b/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/face_detector.py deleted file mode 100644 index 0103411e27860898fee470895a7cf59d8be2e11a..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/face_detector.py +++ /dev/null @@ -1,142 +0,0 @@ -import copy -import os -from pathlib import Path - -import cv2 -import numpy as np -import torch -from torch import nn - -from facelib.detection.yolov5face.models.common import Conv -from facelib.detection.yolov5face.models.yolo import Model -from facelib.detection.yolov5face.utils.datasets import letterbox -from facelib.detection.yolov5face.utils.general import ( - check_img_size, - non_max_suppression_face, - scale_coords, - scale_coords_landmarks, -) - -IS_HIGH_VERSION = tuple(map(int, torch.__version__.split('+')[0].split('.')[:3])) >= (1, 9, 0) - - -def isListempty(inList): - if isinstance(inList, list): # Is a list - return all(map(isListempty, inList)) - return False # Not a list - -class YoloDetector: - def __init__( - self, - config_name, - min_face=10, - target_size=None, - device='cuda', - ): - """ - config_name: name of .yaml config with network configuration from models/ folder. - min_face : minimal face size in pixels. - target_size : target size of smaller image axis (choose lower for faster work). e.g. 480, 720, 1080. - None for original resolution. - """ - self._class_path = Path(__file__).parent.absolute() - self.target_size = target_size - self.min_face = min_face - self.detector = Model(cfg=config_name) - self.device = device - - - def _preprocess(self, imgs): - """ - Preprocessing image before passing through the network. Resize and conversion to torch tensor. - """ - pp_imgs = [] - for img in imgs: - h0, w0 = img.shape[:2] # orig hw - if self.target_size: - r = self.target_size / min(h0, w0) # resize image to img_size - if r < 1: - img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR) - - imgsz = check_img_size(max(img.shape[:2]), s=self.detector.stride.max()) # check img_size - img = letterbox(img, new_shape=imgsz)[0] - pp_imgs.append(img) - pp_imgs = np.array(pp_imgs) - pp_imgs = pp_imgs.transpose(0, 3, 1, 2) - pp_imgs = torch.from_numpy(pp_imgs).to(self.device) - pp_imgs = pp_imgs.float() # uint8 to fp16/32 - return pp_imgs / 255.0 # 0 - 255 to 0.0 - 1.0 - - def _postprocess(self, imgs, origimgs, pred, conf_thres, iou_thres): - """ - Postprocessing of raw pytorch model output. - Returns: - bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2. - points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners). - """ - bboxes = [[] for _ in range(len(origimgs))] - landmarks = [[] for _ in range(len(origimgs))] - - pred = non_max_suppression_face(pred, conf_thres, iou_thres) - - for image_id, origimg in enumerate(origimgs): - img_shape = origimg.shape - image_height, image_width = img_shape[:2] - gn = torch.tensor(img_shape)[[1, 0, 1, 0]] # normalization gain whwh - gn_lks = torch.tensor(img_shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]] # normalization gain landmarks - det = pred[image_id].cpu() - scale_coords(imgs[image_id].shape[1:], det[:, :4], img_shape).round() - scale_coords_landmarks(imgs[image_id].shape[1:], det[:, 5:15], img_shape).round() - - for j in range(det.size()[0]): - box = (det[j, :4].view(1, 4) / gn).view(-1).tolist() - box = list( - map(int, [box[0] * image_width, box[1] * image_height, box[2] * image_width, box[3] * image_height]) - ) - if box[3] - box[1] < self.min_face: - continue - lm = (det[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist() - lm = list(map(int, [i * image_width if j % 2 == 0 else i * image_height for j, i in enumerate(lm)])) - lm = [lm[i : i + 2] for i in range(0, len(lm), 2)] - bboxes[image_id].append(box) - landmarks[image_id].append(lm) - return bboxes, landmarks - - def detect_faces(self, imgs, conf_thres=0.7, iou_thres=0.5): - """ - Get bbox coordinates and keypoints of faces on original image. - Params: - imgs: image or list of images to detect faces on with BGR order (convert to RGB order for inference) - conf_thres: confidence threshold for each prediction - iou_thres: threshold for NMS (filter of intersecting bboxes) - Returns: - bboxes: list of arrays with 4 coordinates of bounding boxes with format x1,y1,x2,y2. - points: list of arrays with coordinates of 5 facial keypoints (eyes, nose, lips corners). - """ - # Pass input images through face detector - images = imgs if isinstance(imgs, list) else [imgs] - images = [cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in images] - origimgs = copy.deepcopy(images) - - images = self._preprocess(images) - - if IS_HIGH_VERSION: - with torch.inference_mode(): # for pytorch>=1.9 - pred = self.detector(images)[0] - else: - with torch.no_grad(): # for pytorch<1.9 - pred = self.detector(images)[0] - - bboxes, points = self._postprocess(images, origimgs, pred, conf_thres, iou_thres) - - # return bboxes, points - if not isListempty(points): - bboxes = np.array(bboxes).reshape(-1,4) - points = np.array(points).reshape(-1,10) - padding = bboxes[:,0].reshape(-1,1) - return np.concatenate((bboxes, padding, points), axis=1) - else: - return None - - def __call__(self, *args): - return self.predict(*args) diff --git a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/common.py b/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/common.py deleted file mode 100644 index 497a00444c4c59725001993a63fe4617e9d323c8..0000000000000000000000000000000000000000 --- a/spaces/dawood17/SayBot_Enchancer/CodeFormer/facelib/detection/yolov5face/models/common.py +++ /dev/null @@ -1,299 +0,0 @@ -# This file contains modules common to various models - -import math - -import numpy as np -import torch -from torch import nn - -from facelib.detection.yolov5face.utils.datasets import letterbox -from facelib.detection.yolov5face.utils.general import ( - make_divisible, - non_max_suppression, - scale_coords, - xyxy2xywh, -) - - -def autopad(k, p=None): # kernel, padding - # Pad to 'same' - if p is None: - p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad - return p - - -def channel_shuffle(x, groups): - batchsize, num_channels, height, width = x.data.size() - channels_per_group = torch.div(num_channels, groups, rounding_mode="trunc") - - # reshape - x = x.view(batchsize, groups, channels_per_group, height, width) - x = torch.transpose(x, 1, 2).contiguous() - - # flatten - return x.view(batchsize, -1, height, width) - - -def DWConv(c1, c2, k=1, s=1, act=True): - # Depthwise convolution - return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) - - -class Conv(nn.Module): - # Standard convolution - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) - self.bn = nn.BatchNorm2d(c2) - self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) - - def forward(self, x): - return self.act(self.bn(self.conv(x))) - - def fuseforward(self, x): - return self.act(self.conv(x)) - - -class StemBlock(nn.Module): - def __init__(self, c1, c2, k=3, s=2, p=None, g=1, act=True): - super().__init__() - self.stem_1 = Conv(c1, c2, k, s, p, g, act) - self.stem_2a = Conv(c2, c2 // 2, 1, 1, 0) - self.stem_2b = Conv(c2 // 2, c2, 3, 2, 1) - self.stem_2p = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True) - self.stem_3 = Conv(c2 * 2, c2, 1, 1, 0) - - def forward(self, x): - stem_1_out = self.stem_1(x) - stem_2a_out = self.stem_2a(stem_1_out) - stem_2b_out = self.stem_2b(stem_2a_out) - stem_2p_out = self.stem_2p(stem_1_out) - return self.stem_3(torch.cat((stem_2b_out, stem_2p_out), 1)) - - -class Bottleneck(nn.Module): - # Standard bottleneck - def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_, c2, 3, 1, g=g) - self.add = shortcut and c1 == c2 - - def forward(self, x): - return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) - - -class BottleneckCSP(nn.Module): - # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) - self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) - self.cv4 = Conv(2 * c_, c2, 1, 1) - self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) - self.act = nn.LeakyReLU(0.1, inplace=True) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - y1 = self.cv3(self.m(self.cv1(x))) - y2 = self.cv2(x) - return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1)))) - - -class C3(nn.Module): - # CSP Bottleneck with 3 convolutions - def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion - super().__init__() - c_ = int(c2 * e) # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c1, c_, 1, 1) - self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2) - self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) - - def forward(self, x): - return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1)) - - -class ShuffleV2Block(nn.Module): - def __init__(self, inp, oup, stride): - super().__init__() - - if not 1 <= stride <= 3: - raise ValueError("illegal stride value") - self.stride = stride - - branch_features = oup // 2 - - if self.stride > 1: - self.branch1 = nn.Sequential( - self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), - nn.BatchNorm2d(inp), - nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - ) - else: - self.branch1 = nn.Sequential() - - self.branch2 = nn.Sequential( - nn.Conv2d( - inp if (self.stride > 1) else branch_features, - branch_features, - kernel_size=1, - stride=1, - padding=0, - bias=False, - ), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), - nn.BatchNorm2d(branch_features), - nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), - nn.BatchNorm2d(branch_features), - nn.SiLU(), - ) - - @staticmethod - def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False): - return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) - - def forward(self, x): - if self.stride == 1: - x1, x2 = x.chunk(2, dim=1) - out = torch.cat((x1, self.branch2(x2)), dim=1) - else: - out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) - out = channel_shuffle(out, 2) - return out - - -class SPP(nn.Module): - # Spatial pyramid pooling layer used in YOLOv3-SPP - def __init__(self, c1, c2, k=(5, 9, 13)): - super().__init__() - c_ = c1 // 2 # hidden channels - self.cv1 = Conv(c1, c_, 1, 1) - self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) - self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) - - def forward(self, x): - x = self.cv1(x) - return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) - - -class Focus(nn.Module): - # Focus wh information into c-space - def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups - super().__init__() - self.conv = Conv(c1 * 4, c2, k, s, p, g, act) - - def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) - return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) - - -class Concat(nn.Module): - # Concatenate a list of tensors along dimension - def __init__(self, dimension=1): - super().__init__() - self.d = dimension - - def forward(self, x): - return torch.cat(x, self.d) - - -class NMS(nn.Module): - # Non-Maximum Suppression (NMS) module - conf = 0.25 # confidence threshold - iou = 0.45 # IoU threshold - classes = None # (optional list) filter by class - - def forward(self, x): - return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) - - -class AutoShape(nn.Module): - # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS - img_size = 640 # inference size (pixels) - conf = 0.25 # NMS confidence threshold - iou = 0.45 # NMS IoU threshold - classes = None # (optional list) filter by class - - def __init__(self, model): - super().__init__() - self.model = model.eval() - - def autoshape(self): - print("autoShape already enabled, skipping... ") # model already converted to model.autoshape() - return self - - def forward(self, imgs, size=640, augment=False, profile=False): - # Inference from various sources. For height=720, width=1280, RGB images example inputs are: - # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(720,1280,3) - # PIL: = Image.open('image.jpg') # HWC x(720,1280,3) - # numpy: = np.zeros((720,1280,3)) # HWC - # torch: = torch.zeros(16,3,720,1280) # BCHW - # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images - - p = next(self.model.parameters()) # for device and type - if isinstance(imgs, torch.Tensor): # torch - return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference - - # Pre-process - n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images - shape0, shape1 = [], [] # image and inference shapes - for i, im in enumerate(imgs): - im = np.array(im) # to numpy - if im.shape[0] < 5: # image in CHW - im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) - im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input - s = im.shape[:2] # HWC - shape0.append(s) # image shape - g = size / max(s) # gain - shape1.append([y * g for y in s]) - imgs[i] = im # update - shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape - x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad - x = np.stack(x, 0) if n > 1 else x[0][None] # stack - x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW - x = torch.from_numpy(x).to(p.device).type_as(p) / 255.0 # uint8 to fp16/32 - - # Inference - with torch.no_grad(): - y = self.model(x, augment, profile)[0] # forward - y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS - - # Post-process - for i in range(n): - scale_coords(shape1, y[i][:, :4], shape0[i]) - - return Detections(imgs, y, self.names) - - -class Detections: - # detections class for YOLOv5 inference results - def __init__(self, imgs, pred, names=None): - super().__init__() - d = pred[0].device # device - gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1.0, 1.0], device=d) for im in imgs] # normalizations - self.imgs = imgs # list of images as numpy arrays - self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) - self.names = names # class names - self.xyxy = pred # xyxy pixels - self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels - self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized - self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized - self.n = len(self.pred) - - def __len__(self): - return self.n - - def tolist(self): - # return a list of Detections objects, i.e. 'for result in results.tolist():' - x = [Detections([self.imgs[i]], [self.pred[i]], self.names) for i in range(self.n)] - for d in x: - for k in ["imgs", "pred", "xyxy", "xyxyn", "xywh", "xywhn"]: - setattr(d, k, getattr(d, k)[0]) # pop out of list - return x diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/chunk.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/chunk.py deleted file mode 100644 index 076cbc4370b4471c2074cade279250a3ebec9041..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/contourpy/chunk.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations - -import math - - -def calc_chunk_sizes( - chunk_size: int | tuple[int, int] | None, - chunk_count: int | tuple[int, int] | None, - total_chunk_count: int | None, - ny: int, - nx: int, -) -> tuple[int, int]: - """Calculate chunk sizes. - - Args: - chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same - size in both directions if only one is specified. - chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the - same count in both irections if only one is specified. - total_chunk_count (int, optional): Total number of chunks. - ny (int): Number of grid points in y-direction. - nx (int): Number of grid points in x-direction. - - Return: - tuple(int, int): Chunk sizes (y_chunk_size, x_chunk_size). - - Note: - A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be - specified. - """ - if sum([chunk_size is not None, chunk_count is not None, total_chunk_count is not None]) > 1: - raise ValueError("Only one of chunk_size, chunk_count and total_chunk_count should be set") - - if total_chunk_count is not None: - max_chunk_count = (nx-1)*(ny-1) - total_chunk_count = min(max(total_chunk_count, 1), max_chunk_count) - if total_chunk_count == 1: - chunk_size = 0 - elif total_chunk_count == max_chunk_count: - chunk_size = (1, 1) - else: - factors = two_factors(total_chunk_count) - if ny > nx: - chunk_count = factors - else: - chunk_count = (factors[1], factors[0]) - - if chunk_count is not None: - if isinstance(chunk_count, tuple): - y_chunk_count, x_chunk_count = chunk_count - else: - y_chunk_count = x_chunk_count = chunk_count - x_chunk_count = min(max(x_chunk_count, 1), nx-1) - y_chunk_count = min(max(y_chunk_count, 1), ny-1) - chunk_size = (math.ceil((ny-1) / y_chunk_count), math.ceil((nx-1) / x_chunk_count)) - - if chunk_size is None: - y_chunk_size = x_chunk_size = 0 - elif isinstance(chunk_size, tuple): - y_chunk_size, x_chunk_size = chunk_size - else: - y_chunk_size = x_chunk_size = chunk_size - - if x_chunk_size < 0 or y_chunk_size < 0: - raise ValueError("chunk_size cannot be negative") - - return y_chunk_size, x_chunk_size - - -def two_factors(n: int) -> tuple[int, int]: - """Split an integer into two integer factors. - - The two factors will be as close as possible to the sqrt of n, and are returned in decreasing - order. Worst case returns (n, 1). - - Args: - n (int): The integer to factorize. - - Return: - tuple(int, int): The two factors of n, in decreasing order. - """ - i = math.ceil(math.sqrt(n)) - while n % i != 0: - i -= 1 - j = n // i - if i > j: - return i, j - else: - return j, i diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-201f0338.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-201f0338.js deleted file mode 100644 index ae3d3fc19bfa2ab5b388ecbd6e3783e509821512..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-201f0338.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as P,e as Q,s as R,m as U,o as H,t as V,g as F,Y as J,h as I,j as O,p as K,x as Y,n as L,k as T,B as ae,C as se,am as _e,F as j,G as N,w as d,u as v,H as S,V as X,ae as Z,N as E,O as q,Q as y,R as p,r as z,v as A,T as D,E as C,P as fe}from"./index-9e76ffee.js";import{B as x}from"./Button-30a08c0b.js";import{I as $}from"./Info-77722665.js";function ce(n){let e,l,i,t,c,o,m;return{c(){e=U("label"),l=U("input"),i=H(),t=U("span"),c=V(n[2]),l.disabled=n[1],F(l,"type","checkbox"),F(l,"name","test"),F(l,"data-testid","checkbox"),F(l,"class","svelte-1ojmf70"),F(t,"class","ml-2 svelte-1ojmf70"),F(e,"class","svelte-1ojmf70"),J(e,"disabled",n[1])},m(u,f){I(u,e,f),O(e,l),l.checked=n[0],O(e,i),O(e,t),O(t,c),o||(m=[K(l,"change",n[5]),K(l,"input",n[6])],o=!0)},p(u,[f]){f&2&&(l.disabled=u[1]),f&1&&(l.checked=u[0]),f&4&&Y(c,u[2]),f&2&&J(e,"disabled",u[1])},i:L,o:L,d(u){u&&T(e),o=!1,ae(m)}}}function oe(n,e,l){let{value:i}=e,{value_is_output:t=!1}=e,{disabled:c=!1}=e,{label:o}=e;const m=se();function u(){m("change",i),t||m("input")}_e(()=>{l(4,t=!1)});function f(){i=this.checked,l(0,i)}const a=r=>{l(0,i=r.currentTarget.checked),m("select",{index:0,value:o,selected:r.currentTarget.checked})};return n.$$set=r=>{"value"in r&&l(0,i=r.value),"value_is_output"in r&&l(4,t=r.value_is_output),"disabled"in r&&l(1,c=r.disabled),"label"in r&&l(2,o=r.label)},n.$$.update=()=>{n.$$.dirty&1&&u()},[i,c,o,m,t,f,a]}let ee=class extends P{constructor(e){super(),Q(this,e,oe,ce,R,{value:0,value_is_output:4,disabled:1,label:2})}};function M(n){let e,l;return e=new $({props:{$$slots:{default:[me]},$$scope:{ctx:n}}}),{c(){j(e.$$.fragment)},m(i,t){N(e,i,t),l=!0},p(i,t){const c={};t&65600&&(c.$$scope={dirty:t,ctx:i}),e.$set(c)},i(i){l||(d(e.$$.fragment,i),l=!0)},o(i){v(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function me(n){let e;return{c(){e=V(n[6])},m(l,i){I(l,e,i)},p(l,i){i&64&&Y(e,l[6])},d(l){l&&T(e)}}}function be(n){let e,l,i,t,c,o,m;const u=[n[10]];let f={};for(let s=0;sq(t,"value",r)),E.push(()=>q(t,"value_is_output",w)),t.$on("change",n[13]),t.$on("input",n[14]),t.$on("select",n[15]),{c(){j(e.$$.fragment),l=H(),a&&a.c(),i=H(),j(t.$$.fragment)},m(s,h){N(e,s,h),I(s,l,h),a&&a.m(s,h),I(s,i,h),N(t,s,h),m=!0},p(s,h){const B=h&1024?y(u,[p(s[10])]):{};e.$set(B),s[6]?a?(a.p(s,h),h&64&&d(a,1)):(a=M(s),a.c(),d(a,1),a.m(i.parentNode,i)):a&&(z(),v(a,1,1,()=>{a=null}),A());const k={};h&32&&(k.label=s[5]),!c&&h&1&&(c=!0,k.value=s[0],D(()=>c=!1)),!o&&h&2&&(o=!0,k.value_is_output=s[1],D(()=>o=!1)),t.$set(k)},i(s){m||(d(e.$$.fragment,s),d(a),d(t.$$.fragment,s),m=!0)},o(s){v(e.$$.fragment,s),v(a),v(t.$$.fragment,s),m=!1},d(s){s&&(T(l),T(i)),S(e,s),a&&a.d(s),S(t,s)}}}function he(n){let e,l;return e=new x({props:{visible:n[4],elem_id:n[2],elem_classes:n[3],container:n[7],scale:n[8],min_width:n[9],$$slots:{default:[be]},$$scope:{ctx:n}}}),{c(){j(e.$$.fragment)},m(i,t){N(e,i,t),l=!0},p(i,[t]){const c={};t&16&&(c.visible=i[4]),t&4&&(c.elem_id=i[2]),t&8&&(c.elem_classes=i[3]),t&128&&(c.container=i[7]),t&256&&(c.scale=i[8]),t&512&&(c.min_width=i[9]),t&66659&&(c.$$scope={dirty:t,ctx:i}),e.$set(c)},i(i){l||(d(e.$$.fragment,i),l=!0)},o(i){v(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function re(n,e,l){let{elem_id:i=""}=e,{elem_classes:t=[]}=e,{visible:c=!0}=e,{value:o=!1}=e,{value_is_output:m=!1}=e,{label:u="Checkbox"}=e,{info:f=void 0}=e,{container:a=!0}=e,{scale:r=null}=e,{min_width:w=void 0}=e,{loading_status:g}=e;function s(_){o=_,l(0,o)}function h(_){m=_,l(1,m)}function B(_){C.call(this,n,_)}function k(_){C.call(this,n,_)}function G(_){C.call(this,n,_)}return n.$$set=_=>{"elem_id"in _&&l(2,i=_.elem_id),"elem_classes"in _&&l(3,t=_.elem_classes),"visible"in _&&l(4,c=_.visible),"value"in _&&l(0,o=_.value),"value_is_output"in _&&l(1,m=_.value_is_output),"label"in _&&l(5,u=_.label),"info"in _&&l(6,f=_.info),"container"in _&&l(7,a=_.container),"scale"in _&&l(8,r=_.scale),"min_width"in _&&l(9,w=_.min_width),"loading_status"in _&&l(10,g=_.loading_status)},[o,m,i,t,c,u,f,a,r,w,g,s,h,B,k,G]}class de extends P{constructor(e){super(),Q(this,e,re,he,R,{elem_id:2,elem_classes:3,visible:4,value:0,value_is_output:1,label:5,info:6,container:7,scale:8,min_width:9,loading_status:10})}}function W(n){let e,l;return e=new $({props:{$$slots:{default:[ge]},$$scope:{ctx:n}}}),{c(){j(e.$$.fragment)},m(i,t){N(e,i,t),l=!0},p(i,t){const c={};t&65600&&(c.$$scope={dirty:t,ctx:i}),e.$set(c)},i(i){l||(d(e.$$.fragment,i),l=!0)},o(i){v(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function ge(n){let e;return{c(){e=V(n[6])},m(l,i){I(l,e,i)},p(l,i){i&64&&Y(e,l[6])},d(l){l&&T(e)}}}function ve(n){let e,l,i,t,c,o,m;const u=[n[10]];let f={};for(let s=0;sq(t,"value",r)),E.push(()=>q(t,"value_is_output",w)),t.$on("change",n[13]),t.$on("input",n[14]),t.$on("select",n[15]),{c(){j(e.$$.fragment),l=H(),a&&a.c(),i=H(),j(t.$$.fragment)},m(s,h){N(e,s,h),I(s,l,h),a&&a.m(s,h),I(s,i,h),N(t,s,h),m=!0},p(s,h){const B=h&1024?y(u,[p(s[10])]):{};e.$set(B),s[6]?a?(a.p(s,h),h&64&&d(a,1)):(a=W(s),a.c(),d(a,1),a.m(i.parentNode,i)):a&&(z(),v(a,1,1,()=>{a=null}),A());const k={};h&32&&(k.label=s[5]),!c&&h&1&&(c=!0,k.value=s[0],D(()=>c=!1)),!o&&h&2&&(o=!0,k.value_is_output=s[1],D(()=>o=!1)),t.$set(k)},i(s){m||(d(e.$$.fragment,s),d(a),d(t.$$.fragment,s),m=!0)},o(s){v(e.$$.fragment,s),v(a),v(t.$$.fragment,s),m=!1},d(s){s&&(T(l),T(i)),S(e,s),a&&a.d(s),S(t,s)}}}function ke(n){let e,l;return e=new x({props:{visible:n[4],elem_id:n[2],elem_classes:n[3],container:n[7],scale:n[8],min_width:n[9],$$slots:{default:[ve]},$$scope:{ctx:n}}}),{c(){j(e.$$.fragment)},m(i,t){N(e,i,t),l=!0},p(i,[t]){const c={};t&16&&(c.visible=i[4]),t&4&&(c.elem_id=i[2]),t&8&&(c.elem_classes=i[3]),t&128&&(c.container=i[7]),t&256&&(c.scale=i[8]),t&512&&(c.min_width=i[9]),t&66659&&(c.$$scope={dirty:t,ctx:i}),e.$set(c)},i(i){l||(d(e.$$.fragment,i),l=!0)},o(i){v(e.$$.fragment,i),l=!1},d(i){S(e,i)}}}function we(n,e,l){let{elem_id:i=""}=e,{elem_classes:t=[]}=e,{visible:c=!0}=e,{value:o=!1}=e,{value_is_output:m=!1}=e,{label:u="Checkbox"}=e,{info:f=void 0}=e,{container:a=!0}=e,{scale:r=null}=e,{min_width:w=void 0}=e,{loading_status:g}=e;function s(_){o=_,l(0,o)}function h(_){m=_,l(1,m)}function B(_){C.call(this,n,_)}function k(_){C.call(this,n,_)}function G(_){C.call(this,n,_)}return n.$$set=_=>{"elem_id"in _&&l(2,i=_.elem_id),"elem_classes"in _&&l(3,t=_.elem_classes),"visible"in _&&l(4,c=_.visible),"value"in _&&l(0,o=_.value),"value_is_output"in _&&l(1,m=_.value_is_output),"label"in _&&l(5,u=_.label),"info"in _&&l(6,f=_.info),"container"in _&&l(7,a=_.container),"scale"in _&&l(8,r=_.scale),"min_width"in _&&l(9,w=_.min_width),"loading_status"in _&&l(10,g=_.loading_status)},[o,m,i,t,c,u,f,a,r,w,g,s,h,B,k,G]}class Ce extends P{constructor(e){super(),Q(this,e,we,ke,R,{elem_id:2,elem_classes:3,visible:4,value:0,value_is_output:1,label:5,info:6,container:7,scale:8,min_width:9,loading_status:10})}}function je(n){let e,l,i,t;function c(u){n[17](u)}function o(u){n[18](u)}let m={elem_id:n[2],elem_classes:n[3],visible:n[4],label:n[5],info:n[6],container:n[8],scale:n[9],min_width:n[10],loading_status:n[11]};return n[0]!==void 0&&(m.value=n[0]),n[1]!==void 0&&(m.value_is_output=n[1]),e=new Ce({props:m}),E.push(()=>q(e,"value",c)),E.push(()=>q(e,"value_is_output",o)),e.$on("change",n[19]),e.$on("input",n[20]),e.$on("select",n[21]),{c(){j(e.$$.fragment)},m(u,f){N(e,u,f),t=!0},p(u,f){const a={};f&4&&(a.elem_id=u[2]),f&8&&(a.elem_classes=u[3]),f&16&&(a.visible=u[4]),f&32&&(a.label=u[5]),f&64&&(a.info=u[6]),f&256&&(a.container=u[8]),f&512&&(a.scale=u[9]),f&1024&&(a.min_width=u[10]),f&2048&&(a.loading_status=u[11]),!l&&f&1&&(l=!0,a.value=u[0],D(()=>l=!1)),!i&&f&2&&(i=!0,a.value_is_output=u[1],D(()=>i=!1)),e.$set(a)},i(u){t||(d(e.$$.fragment,u),t=!0)},o(u){v(e.$$.fragment,u),t=!1},d(u){S(e,u)}}}function Ne(n){let e,l,i,t;function c(u){n[12](u)}function o(u){n[13](u)}let m={elem_id:n[2],elem_classes:n[3],visible:n[4],label:n[5],info:n[6],container:n[8],scale:n[9],min_width:n[10],loading_status:n[11]};return n[0]!==void 0&&(m.value=n[0]),n[1]!==void 0&&(m.value_is_output=n[1]),e=new de({props:m}),E.push(()=>q(e,"value",c)),E.push(()=>q(e,"value_is_output",o)),e.$on("change",n[14]),e.$on("input",n[15]),e.$on("select",n[16]),{c(){j(e.$$.fragment)},m(u,f){N(e,u,f),t=!0},p(u,f){const a={};f&4&&(a.elem_id=u[2]),f&8&&(a.elem_classes=u[3]),f&16&&(a.visible=u[4]),f&32&&(a.label=u[5]),f&64&&(a.info=u[6]),f&256&&(a.container=u[8]),f&512&&(a.scale=u[9]),f&1024&&(a.min_width=u[10]),f&2048&&(a.loading_status=u[11]),!l&&f&1&&(l=!0,a.value=u[0],D(()=>l=!1)),!i&&f&2&&(i=!0,a.value_is_output=u[1],D(()=>i=!1)),e.$set(a)},i(u){t||(d(e.$$.fragment,u),t=!0)},o(u){v(e.$$.fragment,u),t=!1},d(u){S(e,u)}}}function Se(n){let e,l,i,t;const c=[Ne,je],o=[];function m(u,f){return u[7]==="static"?0:1}return e=m(n),l=o[e]=c[e](n),{c(){l.c(),i=fe()},m(u,f){o[e].m(u,f),I(u,i,f),t=!0},p(u,[f]){let a=e;e=m(u),e===a?o[e].p(u,f):(z(),v(o[a],1,1,()=>{o[a]=null}),A(),l=o[e],l?l.p(u,f):(l=o[e]=c[e](u),l.c()),d(l,1),l.m(i.parentNode,i))},i(u){t||(d(l),t=!0)},o(u){v(l),t=!1},d(u){u&&T(i),o[e].d(u)}}}function Be(n,e,l){let{elem_id:i=""}=e,{elem_classes:t=[]}=e,{visible:c=!0}=e,{value:o=!1}=e,{value_is_output:m=!1}=e,{label:u="Checkbox"}=e,{info:f=void 0}=e,{mode:a}=e,{container:r=!0}=e,{scale:w=null}=e,{min_width:g=void 0}=e,{loading_status:s}=e;function h(b){o=b,l(0,o)}function B(b){m=b,l(1,m)}function k(b){C.call(this,n,b)}function G(b){C.call(this,n,b)}function _(b){C.call(this,n,b)}function ne(b){o=b,l(0,o)}function le(b){m=b,l(1,m)}function ie(b){C.call(this,n,b)}function te(b){C.call(this,n,b)}function ue(b){C.call(this,n,b)}return n.$$set=b=>{"elem_id"in b&&l(2,i=b.elem_id),"elem_classes"in b&&l(3,t=b.elem_classes),"visible"in b&&l(4,c=b.visible),"value"in b&&l(0,o=b.value),"value_is_output"in b&&l(1,m=b.value_is_output),"label"in b&&l(5,u=b.label),"info"in b&&l(6,f=b.info),"mode"in b&&l(7,a=b.mode),"container"in b&&l(8,r=b.container),"scale"in b&&l(9,w=b.scale),"min_width"in b&&l(10,g=b.min_width),"loading_status"in b&&l(11,s=b.loading_status)},[o,m,i,t,c,u,f,a,r,w,g,s,h,B,k,G,_,ne,le,ie,te,ue]}class Ie extends P{constructor(e){super(),Q(this,e,Be,Se,R,{elem_id:2,elem_classes:3,visible:4,value:0,value_is_output:1,label:5,info:6,mode:7,container:8,scale:9,min_width:10,loading_status:11})}}const Fe=Ie,Ge=["static","dynamic"];export{Fe as Component,Ge as modes}; -//# sourceMappingURL=index-201f0338.js.map diff --git a/spaces/declare-lab/tango/audioldm/latent_diffusion/ddpm.py b/spaces/declare-lab/tango/audioldm/latent_diffusion/ddpm.py deleted file mode 100644 index ffca031c27d413698adee5a58547b7d0ea4069c3..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/audioldm/latent_diffusion/ddpm.py +++ /dev/null @@ -1,441 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" -import sys -import os - -import torch -import torch.nn as nn -import numpy as np -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm - -from audioldm.utils import exists, default, count_params, instantiate_from_config -from audioldm.latent_diffusion.ema import LitEma -from audioldm.latent_diffusion.util import ( - make_beta_schedule, - extract_into_tensor, - noise_like, -) -import soundfile as sf -import os - - -__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DiffusionWrapper(nn.Module): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [ - None, - "concat", - "crossattn", - "hybrid", - "adm", - "film", - ] - - def forward( - self, x, t, c_concat: list = None, c_crossattn: list = None, c_film: list = None - ): - x = x.contiguous() - t = t.contiguous() - - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == "concat": - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == "crossattn": - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == "hybrid": - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif ( - self.conditioning_key == "film" - ): # The condition is assumed to be a global token, which wil pass through a linear layer and added with the time embedding for the FILM - cc = c_film[0].squeeze(1) # only has one token - out = self.diffusion_model(x, t, y=cc) - elif self.conditioning_key == "adm": - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class DDPM(nn.Module): - # classic DDPM with Gaussian diffusion, in image space - def __init__( - self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - latent_t_size=256, - latent_f_size=16, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0.0, - v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1.0, - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0.0, - ): - super().__init__() - assert parameterization in [ - "eps", - "x0", - ], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - self.state = None - # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - - self.latent_t_size = latent_t_size - self.latent_f_size = latent_f_size - - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - # print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - - self.register_schedule( - given_betas=given_betas, - beta_schedule=beta_schedule, - timesteps=timesteps, - linear_start=linear_start, - linear_end=linear_end, - cosine_s=cosine_s, - ) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - else: - self.logvar = nn.Parameter(self.logvar, requires_grad=False) - - self.logger_save_dir = None - self.logger_project = None - self.logger_version = None - self.label_indices_total = None - # To avoid the system cannot find metric value for checkpoint - self.metrics_buffer = { - "val/kullback_leibler_divergence_sigmoid": 15.0, - "val/kullback_leibler_divergence_softmax": 10.0, - "val/psnr": 0.0, - "val/ssim": 0.0, - "val/inception_score_mean": 1.0, - "val/inception_score_std": 0.0, - "val/kernel_inception_distance_mean": 0.0, - "val/kernel_inception_distance_std": 0.0, - "val/frechet_inception_distance": 133.0, - "val/frechet_audio_distance": 32.0, - } - self.initial_learning_rate = None - - def get_log_dir(self): - if ( - self.logger_save_dir is None - and self.logger_project is None - and self.logger_version is None - ): - return os.path.join( - self.logger.save_dir, self.logger._project, self.logger.version - ) - else: - return os.path.join( - self.logger_save_dir, self.logger_project, self.logger_version - ) - - def set_log_dir(self, save_dir, project, version): - self.logger_save_dir = save_dir - self.logger_project = project - self.logger_version = version - - def register_schedule( - self, - given_betas=None, - beta_schedule="linear", - timesteps=1000, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - ): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule( - beta_schedule, - timesteps, - linear_start=linear_start, - linear_end=linear_end, - cosine_s=cosine_s, - ) - alphas = 1.0 - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) - - (timesteps,) = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert ( - alphas_cumprod.shape[0] == self.num_timesteps - ), "alphas have to be defined for each timestep" - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer("betas", to_torch(betas)) - self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) - self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer( - "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) - ) - self.register_buffer( - "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) - ) - self.register_buffer( - "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) - ) - self.register_buffer( - "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) - ) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * ( - 1.0 - alphas_cumprod_prev - ) / (1.0 - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer("posterior_variance", to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer( - "posterior_log_variance_clipped", - to_torch(np.log(np.maximum(posterior_variance, 1e-20))), - ) - self.register_buffer( - "posterior_mean_coef1", - to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), - ) - self.register_buffer( - "posterior_mean_coef2", - to_torch( - (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) - ), - ) - - if self.parameterization == "eps": - lvlb_weights = self.betas**2 / ( - 2 - * self.posterior_variance - * to_torch(alphas) - * (1 - self.alphas_cumprod) - ) - elif self.parameterization == "x0": - lvlb_weights = ( - 0.5 - * np.sqrt(torch.Tensor(alphas_cumprod)) - / (2.0 * 1 - torch.Tensor(alphas_cumprod)) - ) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - # print(f"{context}: Switched to EMA weights") - pass - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - # print(f"{context}: Restored training weights") - pass - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor( - self.log_one_minus_alphas_cumprod, t, x_start.shape - ) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start - + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor( - self.posterior_log_variance_clipped, t, x_t.shape - ) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1.0, 1.0) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior( - x_start=x_recon, x_t=x, t=t - ) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance( - x=x, t=t, clip_denoised=clip_denoised - ) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = ( - (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))).contiguous() - ) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm( - reversed(range(0, self.num_timesteps)), - desc="Sampling t", - total=self.num_timesteps, - ): - img = self.p_sample( - img, - torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised, - ) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - shape = (batch_size, channels, self.latent_t_size, self.latent_f_size) - channels = self.channels - return self.p_sample_loop(shape, return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return ( - extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) - * noise - ) - - def forward(self, x, *args, **kwargs): - t = torch.randint( - 0, self.num_timesteps, (x.shape[0],), device=self.device - ).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - # fbank, log_magnitudes_stft, label_indices, fname, waveform, clip_label, text = batch - fbank, log_magnitudes_stft, label_indices, fname, waveform, text = batch - ret = {} - - ret["fbank"] = ( - fbank.unsqueeze(1).to(memory_format=torch.contiguous_format).float() - ) - ret["stft"] = log_magnitudes_stft.to( - memory_format=torch.contiguous_format - ).float() - # ret["clip_label"] = clip_label.to(memory_format=torch.contiguous_format).float() - ret["waveform"] = waveform.to(memory_format=torch.contiguous_format).float() - ret["text"] = list(text) - ret["fname"] = fname - - return ret[k] diff --git a/spaces/diacanFperku/AutoGPT/CRACK Canvas X 2019 19.0.319.0 Medicine[BabuPC] FREE.md b/spaces/diacanFperku/AutoGPT/CRACK Canvas X 2019 19.0.319.0 Medicine[BabuPC] FREE.md deleted file mode 100644 index 17eab1d04c5684ff29011215ff42e4caac1e642a..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/CRACK Canvas X 2019 19.0.319.0 Medicine[BabuPC] FREE.md +++ /dev/null @@ -1,10 +0,0 @@ -
        -

        linuxpdf egtre format menu ; Gryjes ex-girlfriend Pirated software sn humain.rar - - - -

        - -
        - - - \ No newline at end of file diff --git a/spaces/nyanko7/sd-diffusers-webui/Dockerfile b/spaces/nyanko7/sd-diffusers-webui/Dockerfile deleted file mode 100644 index bd9a64b22a69533d1dc3c9fd1e72b6b328b8e498..0000000000000000000000000000000000000000 --- a/spaces/nyanko7/sd-diffusers-webui/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# Dockerfile Public T4 - -FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel -ENV DEBIAN_FRONTEND noninteractive - -WORKDIR /content -RUN pip install numexpr einops transformers k_diffusion safetensors gradio diffusers==0.20 xformers - -ADD . . -RUN adduser --disabled-password --gecos '' user -RUN chown -R user:user /content -RUN chmod -R 777 /content -USER user - -EXPOSE 7860 -CMD python /content/app.py diff --git a/spaces/onemriganka/hello_space/app.py b/spaces/onemriganka/hello_space/app.py deleted file mode 100644 index dba1290efb5bb9fd70a987a7e98f9a2372f4c9da..0000000000000000000000000000000000000000 --- a/spaces/onemriganka/hello_space/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import streamlit as st -from transformers import -pipe = pipeline("sentiment analysis") -text = st.textarea("enter some text") - - -if text: - out = pipe(text) - st.json(out) \ No newline at end of file diff --git a/spaces/oyl344531959/White-box-Cartoonization/wbc/network.py b/spaces/oyl344531959/White-box-Cartoonization/wbc/network.py deleted file mode 100644 index 6f16cee1aa1994d0a78c524f459764de5164e637..0000000000000000000000000000000000000000 --- a/spaces/oyl344531959/White-box-Cartoonization/wbc/network.py +++ /dev/null @@ -1,62 +0,0 @@ -import tensorflow as tf -import numpy as np -import tensorflow.contrib.slim as slim - - - -def resblock(inputs, out_channel=32, name='resblock'): - - with tf.variable_scope(name): - - x = slim.convolution2d(inputs, out_channel, [3, 3], - activation_fn=None, scope='conv1') - x = tf.nn.leaky_relu(x) - x = slim.convolution2d(x, out_channel, [3, 3], - activation_fn=None, scope='conv2') - - return x + inputs - - - - -def unet_generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False): - with tf.variable_scope(name, reuse=reuse): - - x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None) - x0 = tf.nn.leaky_relu(x0) - - x1 = slim.convolution2d(x0, channel, [3, 3], stride=2, activation_fn=None) - x1 = tf.nn.leaky_relu(x1) - x1 = slim.convolution2d(x1, channel*2, [3, 3], activation_fn=None) - x1 = tf.nn.leaky_relu(x1) - - x2 = slim.convolution2d(x1, channel*2, [3, 3], stride=2, activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - x2 = slim.convolution2d(x2, channel*4, [3, 3], activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - - for idx in range(num_blocks): - x2 = resblock(x2, out_channel=channel*4, name='block_{}'.format(idx)) - - x2 = slim.convolution2d(x2, channel*2, [3, 3], activation_fn=None) - x2 = tf.nn.leaky_relu(x2) - - h1, w1 = tf.shape(x2)[1], tf.shape(x2)[2] - x3 = tf.image.resize_bilinear(x2, (h1*2, w1*2)) - x3 = slim.convolution2d(x3+x1, channel*2, [3, 3], activation_fn=None) - x3 = tf.nn.leaky_relu(x3) - x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None) - x3 = tf.nn.leaky_relu(x3) - - h2, w2 = tf.shape(x3)[1], tf.shape(x3)[2] - x4 = tf.image.resize_bilinear(x3, (h2*2, w2*2)) - x4 = slim.convolution2d(x4+x0, channel, [3, 3], activation_fn=None) - x4 = tf.nn.leaky_relu(x4) - x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None) - - return x4 - -if __name__ == '__main__': - - - pass \ No newline at end of file diff --git a/spaces/patrickvonplaten/convert/app.py b/spaces/patrickvonplaten/convert/app.py deleted file mode 100644 index 7209604199555b0b9cecb7bab30721f085fefdfe..0000000000000000000000000000000000000000 --- a/spaces/patrickvonplaten/convert/app.py +++ /dev/null @@ -1,94 +0,0 @@ -import csv -from datetime import datetime -import os -from typing import Optional -import gradio as gr - -from convert import convert -from huggingface_hub import HfApi, Repository - - -DATASET_REPO_URL = "https://huggingface.co/datasets/safetensors/conversions" -DATA_FILENAME = "data.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) - -HF_TOKEN = os.environ.get("HF_TOKEN") - -repo: Optional[Repository] = None -if HF_TOKEN: - repo = Repository(local_dir="data", clone_from=DATASET_REPO_URL, token=HF_TOKEN) - - -def run(token: str, model_id: str) -> str: - if token == "" or model_id == "": - return """ - ### Invalid input 🐞 - - Please fill a token and model_id. - """ - try: - api = HfApi(token=token) - is_private = api.model_info(repo_id=model_id).private - print("is_private", is_private) - - commit_info = convert(api=api, model_id=model_id) - print("[commit_info]", commit_info) - - # save in a (public) dataset: - if repo is not None and not is_private: - repo.git_pull(rebase=True) - print("pulled") - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter( - csvfile, fieldnames=["model_id", "pr_url", "time"] - ) - writer.writerow( - { - "model_id": model_id, - "pr_url": commit_info.pr_url, - "time": str(datetime.now()), - } - ) - commit_url = repo.push_to_hub() - print("[dataset]", commit_url) - - return f""" - ### Success 🔥 - - Yay! This model was successfully converted and a PR was open using your token, here: - - [{commit_info.pr_url}]({commit_info.pr_url}) - """ - except Exception as e: - return f""" - ### Error 😢😢😢 - - {e} - """ - - -DESCRIPTION = """ -The steps are the following: - -- Paste a read-access token from hf.co/settings/tokens. Read access is enough given that we will open a PR against the source repo. -- Input a model id from the Hub -- Click "Submit" -- That's it! You'll get feedback if it works or not, and if it worked, you'll get the URL of the opened PR 🔥 - -⚠️ For now only `pytorch_model.bin` files are supported but we'll extend in the future. -""" - -demo = gr.Interface( - title="Convert any model to Safetensors and open a PR", - description=DESCRIPTION, - allow_flagging="never", - article="Check out the [Safetensors repo on GitHub](https://github.com/huggingface/safetensors)", - inputs=[ - gr.Text(max_lines=1, label="your_hf_token"), - gr.Text(max_lines=1, label="model_id"), - ], - outputs=[gr.Markdown(label="output")], - fn=run, -) - -demo.launch() diff --git a/spaces/paulbricman/velma/tests/test_baselines.py b/spaces/paulbricman/velma/tests/test_baselines.py deleted file mode 100644 index e612cd7aa17544cc39c5e6b92ff32301fd72fc67..0000000000000000000000000000000000000000 --- a/spaces/paulbricman/velma/tests/test_baselines.py +++ /dev/null @@ -1,31 +0,0 @@ -from src.baselines import infer_nli, infer_embs -from sentence_transformers import CrossEncoder, SentenceTransformer - - -def test_nli_baseline(): - nli = CrossEncoder('cross-encoder/nli-deberta-v3-base') - - context = 'Social media is awesome because it allows you to connect with others.' - statements = [' Facebook is amazing.', - ' Facebook is awful.'] - - probs = infer_nli(context, statements, nli) - assert probs[0] > probs[1], statements - - context = 'Being vegan is the way to go. Let\'s save the planet.' - statements = [' Eating meat is completely ethical.', - ' Eating meat is not ethical.'] - - probs = infer_nli(context, statements, nli) - assert probs[1] > probs[0], statements - - -def test_embs_baseline(): - encoder = SentenceTransformer('all-MiniLM-L6-v2') - - context = 'Social media is awesome because it allows you to connect with others.' - statements = [' Facebook is amazing.', - ' Facebook is awful.'] - - probs = infer_embs(context, statements, encoder) - assert probs[0] > probs[1], statements diff --git a/spaces/perilli/tortoise-tts-v2/tortoise/__init__.py b/spaces/perilli/tortoise-tts-v2/tortoise/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/app.py b/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/peteralexandercharles/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/planet10/semantic-search/README.md b/spaces/planet10/semantic-search/README.md deleted file mode 100644 index 6e5588c49f5c80bfdbe0845947a241357ae5bb92..0000000000000000000000000000000000000000 --- a/spaces/planet10/semantic-search/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Semantic Search -emoji: 🌍 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/portal/Top-20/back.html b/spaces/portal/Top-20/back.html deleted file mode 100644 index b0037317cdf8264d6687f87ef46e809b189b83af..0000000000000000000000000000000000000000 --- a/spaces/portal/Top-20/back.html +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - -
        - -
        - - - \ No newline at end of file diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Empty-937365d8.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Empty-937365d8.js deleted file mode 100644 index cf611752c24763c1a35976a7bac3ffa1cb983422..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Empty-937365d8.js +++ /dev/null @@ -1,2 +0,0 @@ -import"./Button-89057c03.js";const{SvelteComponent:h,append:b,attr:r,binding_callbacks:v,create_slot:z,detach:E,element:g,get_all_dirty_from_scope:C,get_slot_changes:w,init:B,insert:R,safe_not_equal:k,toggle_class:_,transition_in:q,transition_out:S,update_slot_base:j}=window.__gradio__svelte__internal;function y(n){let e,o,i;const u=n[5].default,l=z(u,n,n[4],null);return{c(){e=g("div"),o=g("div"),l&&l.c(),r(o,"class","icon svelte-1oiin9d"),r(e,"class","empty svelte-1oiin9d"),r(e,"aria-label","Empty value"),_(e,"small",n[0]==="small"),_(e,"large",n[0]==="large"),_(e,"unpadded_box",n[1]),_(e,"small_parent",n[3])},m(t,s){R(t,e,s),b(e,o),l&&l.m(o,null),n[6](e),i=!0},p(t,[s]){l&&l.p&&(!i||s&16)&&j(l,u,t,t[4],i?w(u,t[4],s,null):C(t[4]),null),(!i||s&1)&&_(e,"small",t[0]==="small"),(!i||s&1)&&_(e,"large",t[0]==="large"),(!i||s&2)&&_(e,"unpadded_box",t[1]),(!i||s&8)&&_(e,"small_parent",t[3])},i(t){i||(q(l,t),i=!0)},o(t){S(l,t),i=!1},d(t){t&&E(e),l&&l.d(t),n[6](null)}}}function A(n,e,o){let i,{$$slots:u={},$$scope:l}=e,{size:t="small"}=e,{unpadded_box:s=!1}=e,d;function m(a){if(!a)return!1;const{height:f}=a.getBoundingClientRect(),{height:c}=a.parentElement?.getBoundingClientRect()||{height:f};return f>c+2}function p(a){v[a?"unshift":"push"](()=>{d=a,o(2,d)})}return n.$$set=a=>{"size"in a&&o(0,t=a.size),"unpadded_box"in a&&o(1,s=a.unpadded_box),"$$scope"in a&&o(4,l=a.$$scope)},n.$$.update=()=>{n.$$.dirty&4&&o(3,i=m(d))},[t,s,d,i,l,u,p]}class F extends h{constructor(e){super(),B(this,e,A,y,k,{size:0,unpadded_box:1})}}export{F as E}; -//# sourceMappingURL=Empty-937365d8.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-23981437.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-23981437.js deleted file mode 100644 index 7300bb87fde068151121de7f15f874d341b5f5a5..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Example-23981437.js +++ /dev/null @@ -1,2 +0,0 @@ -import{M as o}from"./Example.svelte_svelte_type_style_lang-f75a701b.js";import"./Index-c74a8b7c.js";import"./index-50ad4c77.js";import"./svelte/svelte.js";const{SvelteComponent:u,attr:c,create_component:d,destroy_component:g,detach:b,element:h,init:k,insert:w,mount_component:z,safe_not_equal:v,toggle_class:m,transition_in:y,transition_out:q}=window.__gradio__svelte__internal;function C(a){let e,l,s;return l=new o({props:{message:a[0],latex_delimiters:a[5],sanitize_html:a[3],line_breaks:a[4],chatbot:!1}}),{c(){e=h("div"),d(l.$$.fragment),c(e,"class","prose svelte-1ayixqk"),m(e,"table",a[1]==="table"),m(e,"gallery",a[1]==="gallery"),m(e,"selected",a[2])},m(t,i){w(t,e,i),z(l,e,null),s=!0},p(t,[i]){const _={};i&1&&(_.message=t[0]),i&32&&(_.latex_delimiters=t[5]),i&8&&(_.sanitize_html=t[3]),i&16&&(_.line_breaks=t[4]),l.$set(_),(!s||i&2)&&m(e,"table",t[1]==="table"),(!s||i&2)&&m(e,"gallery",t[1]==="gallery"),(!s||i&4)&&m(e,"selected",t[2])},i(t){s||(y(l.$$.fragment,t),s=!0)},o(t){q(l.$$.fragment,t),s=!1},d(t){t&&b(e),g(l)}}}function M(a,e,l){let{value:s}=e,{type:t}=e,{selected:i=!1}=e,{sanitize_html:_}=e,{line_breaks:r}=e,{latex_delimiters:f}=e;return a.$$set=n=>{"value"in n&&l(0,s=n.value),"type"in n&&l(1,t=n.type),"selected"in n&&l(2,i=n.selected),"sanitize_html"in n&&l(3,_=n.sanitize_html),"line_breaks"in n&&l(4,r=n.line_breaks),"latex_delimiters"in n&&l(5,f=n.latex_delimiters)},[s,t,i,_,r,f]}class B extends u{constructor(e){super(),k(this,e,M,C,v,{value:0,type:1,selected:2,sanitize_html:3,line_breaks:4,latex_delimiters:5})}}export{B as default}; -//# sourceMappingURL=Example-23981437.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_arrow_patches.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_arrow_patches.py deleted file mode 100644 index 8d573b4adb1b9c1c24be66e5922b0a7bedec9c63..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/tests/test_arrow_patches.py +++ /dev/null @@ -1,177 +0,0 @@ -import pytest -import platform -import matplotlib.pyplot as plt -from matplotlib.testing.decorators import image_comparison -import matplotlib.patches as mpatches - - -def draw_arrow(ax, t, r): - ax.annotate('', xy=(0.5, 0.5 + r), xytext=(0.5, 0.5), size=30, - arrowprops=dict(arrowstyle=t, - fc="b", ec='k')) - - -@image_comparison(['fancyarrow_test_image']) -def test_fancyarrow(): - # Added 0 to test division by zero error described in issue 3930 - r = [0.4, 0.3, 0.2, 0.1, 0] - t = ["fancy", "simple", mpatches.ArrowStyle.Fancy()] - - fig, axs = plt.subplots(len(t), len(r), squeeze=False, - figsize=(8, 4.5), subplot_kw=dict(aspect=1)) - - for i_r, r1 in enumerate(r): - for i_t, t1 in enumerate(t): - ax = axs[i_t, i_r] - draw_arrow(ax, t1, r1) - ax.tick_params(labelleft=False, labelbottom=False) - - -@image_comparison(['boxarrow_test_image.png']) -def test_boxarrow(): - - styles = mpatches.BoxStyle.get_styles() - - n = len(styles) - spacing = 1.2 - - figheight = (n * spacing + .5) - fig = plt.figure(figsize=(4 / 1.5, figheight / 1.5)) - - fontsize = 0.3 * 72 - - for i, stylename in enumerate(sorted(styles)): - fig.text(0.5, ((n - i) * spacing - 0.5)/figheight, stylename, - ha="center", - size=fontsize, - transform=fig.transFigure, - bbox=dict(boxstyle=stylename, fc="w", ec="k")) - - -def __prepare_fancyarrow_dpi_cor_test(): - """ - Convenience function that prepares and returns a FancyArrowPatch. It aims - at being used to test that the size of the arrow head does not depend on - the DPI value of the exported picture. - - NB: this function *is not* a test in itself! - """ - fig2 = plt.figure("fancyarrow_dpi_cor_test", figsize=(4, 3), dpi=50) - ax = fig2.add_subplot() - ax.set_xlim([0, 1]) - ax.set_ylim([0, 1]) - ax.add_patch(mpatches.FancyArrowPatch(posA=(0.3, 0.4), posB=(0.8, 0.6), - lw=3, arrowstyle='->', - mutation_scale=100)) - return fig2 - - -@image_comparison(['fancyarrow_dpi_cor_100dpi.png'], remove_text=True, - tol=0 if platform.machine() == 'x86_64' else 0.02, - savefig_kwarg=dict(dpi=100)) -def test_fancyarrow_dpi_cor_100dpi(): - """ - Check the export of a FancyArrowPatch @ 100 DPI. FancyArrowPatch is - instantiated through a dedicated function because another similar test - checks a similar export but with a different DPI value. - - Remark: test only a rasterized format. - """ - - __prepare_fancyarrow_dpi_cor_test() - - -@image_comparison(['fancyarrow_dpi_cor_200dpi.png'], remove_text=True, - tol=0 if platform.machine() == 'x86_64' else 0.02, - savefig_kwarg=dict(dpi=200)) -def test_fancyarrow_dpi_cor_200dpi(): - """ - As test_fancyarrow_dpi_cor_100dpi, but exports @ 200 DPI. The relative size - of the arrow head should be the same. - """ - - __prepare_fancyarrow_dpi_cor_test() - - -@image_comparison(['fancyarrow_dash.png'], remove_text=True, style='default') -def test_fancyarrow_dash(): - fig, ax = plt.subplots() - e = mpatches.FancyArrowPatch((0, 0), (0.5, 0.5), - arrowstyle='-|>', - connectionstyle='angle3,angleA=0,angleB=90', - mutation_scale=10.0, - linewidth=2, - linestyle='dashed', - color='k') - e2 = mpatches.FancyArrowPatch((0, 0), (0.5, 0.5), - arrowstyle='-|>', - connectionstyle='angle3', - mutation_scale=10.0, - linewidth=2, - linestyle='dotted', - color='k') - ax.add_patch(e) - ax.add_patch(e2) - - -@image_comparison(['arrow_styles.png'], style='mpl20', remove_text=True, - tol=0 if platform.machine() == 'x86_64' else 0.005) -def test_arrow_styles(): - styles = mpatches.ArrowStyle.get_styles() - - n = len(styles) - fig, ax = plt.subplots(figsize=(8, 8)) - ax.set_xlim(0, 1) - ax.set_ylim(-1, n) - fig.subplots_adjust(left=0, right=1, bottom=0, top=1) - - for i, stylename in enumerate(sorted(styles)): - patch = mpatches.FancyArrowPatch((0.1 + (i % 2)*0.05, i), - (0.45 + (i % 2)*0.05, i), - arrowstyle=stylename, - mutation_scale=25) - ax.add_patch(patch) - - for i, stylename in enumerate([']-[', ']-', '-[', '|-|']): - style = stylename - if stylename[0] != '-': - style += ',angleA=ANGLE' - if stylename[-1] != '-': - style += ',angleB=ANGLE' - - for j, angle in enumerate([-30, 60]): - arrowstyle = style.replace('ANGLE', str(angle)) - patch = mpatches.FancyArrowPatch((0.55, 2*i + j), (0.9, 2*i + j), - arrowstyle=arrowstyle, - mutation_scale=25) - ax.add_patch(patch) - - -@image_comparison(['connection_styles.png'], style='mpl20', remove_text=True) -def test_connection_styles(): - styles = mpatches.ConnectionStyle.get_styles() - - n = len(styles) - fig, ax = plt.subplots(figsize=(6, 10)) - ax.set_xlim(0, 1) - ax.set_ylim(-1, n) - - for i, stylename in enumerate(sorted(styles)): - patch = mpatches.FancyArrowPatch((0.1, i), (0.8, i + 0.5), - arrowstyle="->", - connectionstyle=stylename, - mutation_scale=25) - ax.add_patch(patch) - - -def test_invalid_intersection(): - conn_style_1 = mpatches.ConnectionStyle.Angle3(angleA=20, angleB=200) - p1 = mpatches.FancyArrowPatch((.2, .2), (.5, .5), - connectionstyle=conn_style_1) - with pytest.raises(ValueError): - plt.gca().add_patch(p1) - - conn_style_2 = mpatches.ConnectionStyle.Angle3(angleA=20, angleB=199.9) - p2 = mpatches.FancyArrowPatch((.2, .2), (.5, .5), - connectionstyle=conn_style_2) - plt.gca().add_patch(p2) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/_libs/window/__init__.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/_libs/window/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py deleted file mode 100644 index be0ded0ecdf57272eb576a14fb0373af6d723853..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/plotting/_matplotlib/converter.py +++ /dev/null @@ -1,1139 +0,0 @@ -from __future__ import annotations - -import contextlib -import datetime as pydt -from datetime import ( - datetime, - timedelta, - tzinfo, -) -import functools -from typing import ( - TYPE_CHECKING, - Any, - Final, - cast, -) -import warnings - -import matplotlib.dates as mdates -from matplotlib.ticker import ( - AutoLocator, - Formatter, - Locator, -) -from matplotlib.transforms import nonsingular -import matplotlib.units as munits -import numpy as np - -from pandas._libs import lib -from pandas._libs.tslibs import ( - Timestamp, - to_offset, -) -from pandas._libs.tslibs.dtypes import FreqGroup -from pandas._typing import F - -from pandas.core.dtypes.common import ( - is_float, - is_float_dtype, - is_integer, - is_integer_dtype, - is_nested_list_like, -) - -from pandas import ( - Index, - Series, - get_option, -) -import pandas.core.common as com -from pandas.core.indexes.datetimes import date_range -from pandas.core.indexes.period import ( - Period, - PeriodIndex, - period_range, -) -import pandas.core.tools.datetimes as tools - -if TYPE_CHECKING: - from collections.abc import Generator - - from pandas._libs.tslibs.offsets import BaseOffset - -# constants -HOURS_PER_DAY: Final = 24.0 -MIN_PER_HOUR: Final = 60.0 -SEC_PER_MIN: Final = 60.0 - -SEC_PER_HOUR: Final = SEC_PER_MIN * MIN_PER_HOUR -SEC_PER_DAY: Final = SEC_PER_HOUR * HOURS_PER_DAY - -MUSEC_PER_DAY: Final = 10**6 * SEC_PER_DAY - -_mpl_units = {} # Cache for units overwritten by us - - -def get_pairs(): - pairs = [ - (Timestamp, DatetimeConverter), - (Period, PeriodConverter), - (pydt.datetime, DatetimeConverter), - (pydt.date, DatetimeConverter), - (pydt.time, TimeConverter), - (np.datetime64, DatetimeConverter), - ] - return pairs - - -def register_pandas_matplotlib_converters(func: F) -> F: - """ - Decorator applying pandas_converters. - """ - - @functools.wraps(func) - def wrapper(*args, **kwargs): - with pandas_converters(): - return func(*args, **kwargs) - - return cast(F, wrapper) - - -@contextlib.contextmanager -def pandas_converters() -> Generator[None, None, None]: - """ - Context manager registering pandas' converters for a plot. - - See Also - -------- - register_pandas_matplotlib_converters : Decorator that applies this. - """ - value = get_option("plotting.matplotlib.register_converters") - - if value: - # register for True or "auto" - register() - try: - yield - finally: - if value == "auto": - # only deregister for "auto" - deregister() - - -def register() -> None: - pairs = get_pairs() - for type_, cls in pairs: - # Cache previous converter if present - if type_ in munits.registry and not isinstance(munits.registry[type_], cls): - previous = munits.registry[type_] - _mpl_units[type_] = previous - # Replace with pandas converter - munits.registry[type_] = cls() - - -def deregister() -> None: - # Renamed in pandas.plotting.__init__ - for type_, cls in get_pairs(): - # We use type to catch our classes directly, no inheritance - if type(munits.registry.get(type_)) is cls: - munits.registry.pop(type_) - - # restore the old keys - for unit, formatter in _mpl_units.items(): - if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: - # make it idempotent by excluding ours. - munits.registry[unit] = formatter - - -def _to_ordinalf(tm: pydt.time) -> float: - tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10**6 - return tot_sec - - -def time2num(d): - if isinstance(d, str): - parsed = Timestamp(d) - return _to_ordinalf(parsed.time()) - if isinstance(d, pydt.time): - return _to_ordinalf(d) - return d - - -class TimeConverter(munits.ConversionInterface): - @staticmethod - def convert(value, unit, axis): - valid_types = (str, pydt.time) - if isinstance(value, valid_types) or is_integer(value) or is_float(value): - return time2num(value) - if isinstance(value, Index): - return value.map(time2num) - if isinstance(value, (list, tuple, np.ndarray, Index)): - return [time2num(x) for x in value] - return value - - @staticmethod - def axisinfo(unit, axis) -> munits.AxisInfo | None: - if unit != "time": - return None - - majloc = AutoLocator() - majfmt = TimeFormatter(majloc) - return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label="time") - - @staticmethod - def default_units(x, axis) -> str: - return "time" - - -# time formatter -class TimeFormatter(Formatter): - def __init__(self, locs) -> None: - self.locs = locs - - def __call__(self, x, pos: int = 0) -> str: - """ - Return the time of day as a formatted string. - - Parameters - ---------- - x : float - The time of day specified as seconds since 00:00 (midnight), - with up to microsecond precision. - pos - Unused - - Returns - ------- - str - A string in HH:MM:SS.mmmuuu format. Microseconds, - milliseconds and seconds are only displayed if non-zero. - """ - fmt = "%H:%M:%S.%f" - s = int(x) - msus = round((x - s) * 10**6) - ms = msus // 1000 - us = msus % 1000 - m, s = divmod(s, 60) - h, m = divmod(m, 60) - _, h = divmod(h, 24) - if us != 0: - return pydt.time(h, m, s, msus).strftime(fmt) - elif ms != 0: - return pydt.time(h, m, s, msus).strftime(fmt)[:-3] - elif s != 0: - return pydt.time(h, m, s).strftime("%H:%M:%S") - - return pydt.time(h, m).strftime("%H:%M") - - -# Period Conversion - - -class PeriodConverter(mdates.DateConverter): - @staticmethod - def convert(values, units, axis): - if is_nested_list_like(values): - values = [PeriodConverter._convert_1d(v, units, axis) for v in values] - else: - values = PeriodConverter._convert_1d(values, units, axis) - return values - - @staticmethod - def _convert_1d(values, units, axis): - if not hasattr(axis, "freq"): - raise TypeError("Axis must have `freq` set to convert to Periods") - valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", "Period with BDay freq is deprecated", category=FutureWarning - ) - warnings.filterwarnings( - "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning - ) - if ( - isinstance(values, valid_types) - or is_integer(values) - or is_float(values) - ): - return get_datevalue(values, axis.freq) - elif isinstance(values, PeriodIndex): - return values.asfreq(axis.freq).asi8 - elif isinstance(values, Index): - return values.map(lambda x: get_datevalue(x, axis.freq)) - elif lib.infer_dtype(values, skipna=False) == "period": - # https://github.com/pandas-dev/pandas/issues/24304 - # convert ndarray[period] -> PeriodIndex - return PeriodIndex(values, freq=axis.freq).asi8 - elif isinstance(values, (list, tuple, np.ndarray, Index)): - return [get_datevalue(x, axis.freq) for x in values] - return values - - -def get_datevalue(date, freq): - if isinstance(date, Period): - return date.asfreq(freq).ordinal - elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): - return Period(date, freq).ordinal - elif ( - is_integer(date) - or is_float(date) - or (isinstance(date, (np.ndarray, Index)) and (date.size == 1)) - ): - return date - elif date is None: - return None - raise ValueError(f"Unrecognizable date '{date}'") - - -# Datetime Conversion -class DatetimeConverter(mdates.DateConverter): - @staticmethod - def convert(values, unit, axis): - # values might be a 1-d array, or a list-like of arrays. - if is_nested_list_like(values): - values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] - else: - values = DatetimeConverter._convert_1d(values, unit, axis) - return values - - @staticmethod - def _convert_1d(values, unit, axis): - def try_parse(values): - try: - return mdates.date2num(tools.to_datetime(values)) - except Exception: - return values - - if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): - return mdates.date2num(values) - elif is_integer(values) or is_float(values): - return values - elif isinstance(values, str): - return try_parse(values) - elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): - if isinstance(values, Series): - # https://github.com/matplotlib/matplotlib/issues/11391 - # Series was skipped. Convert to DatetimeIndex to get asi8 - values = Index(values) - if isinstance(values, Index): - values = values.values - if not isinstance(values, np.ndarray): - values = com.asarray_tuplesafe(values) - - if is_integer_dtype(values) or is_float_dtype(values): - return values - - try: - values = tools.to_datetime(values) - except Exception: - pass - - values = mdates.date2num(values) - - return values - - @staticmethod - def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo: - """ - Return the :class:`~matplotlib.units.AxisInfo` for *unit*. - - *unit* is a tzinfo instance or None. - The *axis* argument is required but not used. - """ - tz = unit - - majloc = PandasAutoDateLocator(tz=tz) - majfmt = PandasAutoDateFormatter(majloc, tz=tz) - datemin = pydt.date(2000, 1, 1) - datemax = pydt.date(2010, 1, 1) - - return munits.AxisInfo( - majloc=majloc, majfmt=majfmt, label="", default_limits=(datemin, datemax) - ) - - -class PandasAutoDateFormatter(mdates.AutoDateFormatter): - def __init__(self, locator, tz=None, defaultfmt: str = "%Y-%m-%d") -> None: - mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) - - -class PandasAutoDateLocator(mdates.AutoDateLocator): - def get_locator(self, dmin, dmax): - """Pick the best locator based on a distance.""" - tot_sec = (dmax - dmin).total_seconds() - - if abs(tot_sec) < self.minticks: - self._freq = -1 - locator = MilliSecondLocator(self.tz) - locator.set_axis(self.axis) - - locator.axis.set_view_interval(*self.axis.get_view_interval()) - locator.axis.set_data_interval(*self.axis.get_data_interval()) - return locator - - return mdates.AutoDateLocator.get_locator(self, dmin, dmax) - - def _get_unit(self): - return MilliSecondLocator.get_unit_generic(self._freq) - - -class MilliSecondLocator(mdates.DateLocator): - UNIT = 1.0 / (24 * 3600 * 1000) - - def __init__(self, tz) -> None: - mdates.DateLocator.__init__(self, tz) - self._interval = 1.0 - - def _get_unit(self): - return self.get_unit_generic(-1) - - @staticmethod - def get_unit_generic(freq): - unit = mdates.RRuleLocator.get_unit_generic(freq) - if unit < 0: - return MilliSecondLocator.UNIT - return unit - - def __call__(self): - # if no data have been set, this will tank with a ValueError - try: - dmin, dmax = self.viewlim_to_dt() - except ValueError: - return [] - - # We need to cap at the endpoints of valid datetime - nmax, nmin = mdates.date2num((dmax, dmin)) - - num = (nmax - nmin) * 86400 * 1000 - max_millis_ticks = 6 - for interval in [1, 10, 50, 100, 200, 500]: - if num <= interval * (max_millis_ticks - 1): - self._interval = interval - break - # We went through the whole loop without breaking, default to 1 - self._interval = 1000.0 - - estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) - - if estimate > self.MAXTICKS * 2: - raise RuntimeError( - "MillisecondLocator estimated to generate " - f"{estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS" - f"* 2 ({self.MAXTICKS * 2:d}) " - ) - - interval = self._get_interval() - freq = f"{interval}L" - tz = self.tz.tzname(None) - st = dmin.replace(tzinfo=None) - ed = dmin.replace(tzinfo=None) - all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) - - try: - if len(all_dates) > 0: - locs = self.raise_if_exceeds(mdates.date2num(all_dates)) - return locs - except Exception: # pragma: no cover - pass - - lims = mdates.date2num([dmin, dmax]) - return lims - - def _get_interval(self): - return self._interval - - def autoscale(self): - """ - Set the view limits to include the data range. - """ - # We need to cap at the endpoints of valid datetime - dmin, dmax = self.datalim_to_dt() - - vmin = mdates.date2num(dmin) - vmax = mdates.date2num(dmax) - - return self.nonsingular(vmin, vmax) - - -def _from_ordinal(x, tz: tzinfo | None = None) -> datetime: - ix = int(x) - dt = datetime.fromordinal(ix) - remainder = float(x) - ix - hour, remainder = divmod(24 * remainder, 1) - minute, remainder = divmod(60 * remainder, 1) - second, remainder = divmod(60 * remainder, 1) - microsecond = int(1_000_000 * remainder) - if microsecond < 10: - microsecond = 0 # compensate for rounding errors - dt = datetime( - dt.year, dt.month, dt.day, int(hour), int(minute), int(second), microsecond - ) - if tz is not None: - dt = dt.astimezone(tz) - - if microsecond > 999990: # compensate for rounding errors - dt += timedelta(microseconds=1_000_000 - microsecond) - - return dt - - -# Fixed frequency dynamic tick locators and formatters - -# ------------------------------------------------------------------------- -# --- Locators --- -# ------------------------------------------------------------------------- - - -def _get_default_annual_spacing(nyears) -> tuple[int, int]: - """ - Returns a default spacing between consecutive ticks for annual data. - """ - if nyears < 11: - (min_spacing, maj_spacing) = (1, 1) - elif nyears < 20: - (min_spacing, maj_spacing) = (1, 2) - elif nyears < 50: - (min_spacing, maj_spacing) = (1, 5) - elif nyears < 100: - (min_spacing, maj_spacing) = (5, 10) - elif nyears < 200: - (min_spacing, maj_spacing) = (5, 25) - elif nyears < 600: - (min_spacing, maj_spacing) = (10, 50) - else: - factor = nyears // 1000 + 1 - (min_spacing, maj_spacing) = (factor * 20, factor * 100) - return (min_spacing, maj_spacing) - - -def period_break(dates: PeriodIndex, period: str) -> np.ndarray: - """ - Returns the indices where the given period changes. - - Parameters - ---------- - dates : PeriodIndex - Array of intervals to monitor. - period : str - Name of the period to monitor. - """ - current = getattr(dates, period) - previous = getattr(dates - 1 * dates.freq, period) - return np.nonzero(current - previous)[0] - - -def has_level_label(label_flags: np.ndarray, vmin: float) -> bool: - """ - Returns true if the ``label_flags`` indicate there is at least one label - for this level. - - if the minimum view limit is not an exact integer, then the first tick - label won't be shown, so we must adjust for that. - """ - if label_flags.size == 0 or ( - label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0 - ): - return False - else: - return True - - -def _daily_finder(vmin, vmax, freq: BaseOffset): - # error: "BaseOffset" has no attribute "_period_dtype_code" - dtype_code = freq._period_dtype_code # type: ignore[attr-defined] - freq_group = FreqGroup.from_period_dtype_code(dtype_code) - - periodsperday = -1 - - if dtype_code >= FreqGroup.FR_HR.value: - if freq_group == FreqGroup.FR_NS: - periodsperday = 24 * 60 * 60 * 1000000000 - elif freq_group == FreqGroup.FR_US: - periodsperday = 24 * 60 * 60 * 1000000 - elif freq_group == FreqGroup.FR_MS: - periodsperday = 24 * 60 * 60 * 1000 - elif freq_group == FreqGroup.FR_SEC: - periodsperday = 24 * 60 * 60 - elif freq_group == FreqGroup.FR_MIN: - periodsperday = 24 * 60 - elif freq_group == FreqGroup.FR_HR: - periodsperday = 24 - else: # pragma: no cover - raise ValueError(f"unexpected frequency: {dtype_code}") - periodsperyear = 365 * periodsperday - periodspermonth = 28 * periodsperday - - elif freq_group == FreqGroup.FR_BUS: - periodsperyear = 261 - periodspermonth = 19 - elif freq_group == FreqGroup.FR_DAY: - periodsperyear = 365 - periodspermonth = 28 - elif freq_group == FreqGroup.FR_WK: - periodsperyear = 52 - periodspermonth = 3 - else: # pragma: no cover - raise ValueError("unexpected frequency") - - # save this for later usage - vmin_orig = vmin - - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", "Period with BDay freq is deprecated", category=FutureWarning - ) - warnings.filterwarnings( - "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning - ) - (vmin, vmax) = ( - Period(ordinal=int(vmin), freq=freq), - Period(ordinal=int(vmax), freq=freq), - ) - assert isinstance(vmin, Period) - assert isinstance(vmax, Period) - span = vmax.ordinal - vmin.ordinal + 1 - - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", "Period with BDay freq is deprecated", category=FutureWarning - ) - warnings.filterwarnings( - "ignore", r"PeriodDtype\[B\] is deprecated", category=FutureWarning - ) - dates_ = period_range(start=vmin, end=vmax, freq=freq) - - # Initialize the output - info = np.zeros( - span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] - ) - info["val"][:] = dates_.asi8 - info["fmt"][:] = "" - info["maj"][[0, -1]] = True - # .. and set some shortcuts - info_maj = info["maj"] - info_min = info["min"] - info_fmt = info["fmt"] - - def first_label(label_flags): - if (label_flags[0] == 0) and (label_flags.size > 1) and ((vmin_orig % 1) > 0.0): - return label_flags[1] - else: - return label_flags[0] - - # Case 1. Less than a month - if span <= periodspermonth: - day_start = period_break(dates_, "day") - month_start = period_break(dates_, "month") - - def _hour_finder(label_interval, force_year_start) -> None: - _hour = dates_.hour - _prev_hour = (dates_ - 1 * dates_.freq).hour - hour_start = (_hour - _prev_hour) != 0 - info_maj[day_start] = True - info_min[hour_start & (_hour % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt[hour_start & (_hour % label_interval == 0)] = "%H:%M" - info_fmt[day_start] = "%H:%M\n%d-%b" - info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" - if force_year_start and not has_level_label(year_start, vmin_orig): - info_fmt[first_label(day_start)] = "%H:%M\n%d-%b\n%Y" - - def _minute_finder(label_interval) -> None: - hour_start = period_break(dates_, "hour") - _minute = dates_.minute - _prev_minute = (dates_ - 1 * dates_.freq).minute - minute_start = (_minute - _prev_minute) != 0 - info_maj[hour_start] = True - info_min[minute_start & (_minute % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] - info_fmt[minute_start & (_minute % label_interval == 0)] = "%H:%M" - info_fmt[day_start] = "%H:%M\n%d-%b" - info_fmt[year_start] = "%H:%M\n%d-%b\n%Y" - - def _second_finder(label_interval) -> None: - minute_start = period_break(dates_, "minute") - _second = dates_.second - _prev_second = (dates_ - 1 * dates_.freq).second - second_start = (_second - _prev_second) != 0 - info["maj"][minute_start] = True - info["min"][second_start & (_second % label_interval == 0)] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] - info_fmt[second_start & (_second % label_interval == 0)] = "%H:%M:%S" - info_fmt[day_start] = "%H:%M:%S\n%d-%b" - info_fmt[year_start] = "%H:%M:%S\n%d-%b\n%Y" - - if span < periodsperday / 12000: - _second_finder(1) - elif span < periodsperday / 6000: - _second_finder(2) - elif span < periodsperday / 2400: - _second_finder(5) - elif span < periodsperday / 1200: - _second_finder(10) - elif span < periodsperday / 800: - _second_finder(15) - elif span < periodsperday / 400: - _second_finder(30) - elif span < periodsperday / 150: - _minute_finder(1) - elif span < periodsperday / 70: - _minute_finder(2) - elif span < periodsperday / 24: - _minute_finder(5) - elif span < periodsperday / 12: - _minute_finder(15) - elif span < periodsperday / 6: - _minute_finder(30) - elif span < periodsperday / 2.5: - _hour_finder(1, False) - elif span < periodsperday / 1.5: - _hour_finder(2, False) - elif span < periodsperday * 1.25: - _hour_finder(3, False) - elif span < periodsperday * 2.5: - _hour_finder(6, True) - elif span < periodsperday * 4: - _hour_finder(12, True) - else: - info_maj[month_start] = True - info_min[day_start] = True - year_start = period_break(dates_, "year") - info_fmt = info["fmt"] - info_fmt[day_start] = "%d" - info_fmt[month_start] = "%d\n%b" - info_fmt[year_start] = "%d\n%b\n%Y" - if not has_level_label(year_start, vmin_orig): - if not has_level_label(month_start, vmin_orig): - info_fmt[first_label(day_start)] = "%d\n%b\n%Y" - else: - info_fmt[first_label(month_start)] = "%d\n%b\n%Y" - - # Case 2. Less than three months - elif span <= periodsperyear // 4: - month_start = period_break(dates_, "month") - info_maj[month_start] = True - if dtype_code < FreqGroup.FR_HR.value: - info["min"] = True - else: - day_start = period_break(dates_, "day") - info["min"][day_start] = True - week_start = period_break(dates_, "week") - year_start = period_break(dates_, "year") - info_fmt[week_start] = "%d" - info_fmt[month_start] = "\n\n%b" - info_fmt[year_start] = "\n\n%b\n%Y" - if not has_level_label(year_start, vmin_orig): - if not has_level_label(month_start, vmin_orig): - info_fmt[first_label(week_start)] = "\n\n%b\n%Y" - else: - info_fmt[first_label(month_start)] = "\n\n%b\n%Y" - # Case 3. Less than 14 months ............... - elif span <= 1.15 * periodsperyear: - year_start = period_break(dates_, "year") - month_start = period_break(dates_, "month") - week_start = period_break(dates_, "week") - info_maj[month_start] = True - info_min[week_start] = True - info_min[year_start] = False - info_min[month_start] = False - info_fmt[month_start] = "%b" - info_fmt[year_start] = "%b\n%Y" - if not has_level_label(year_start, vmin_orig): - info_fmt[first_label(month_start)] = "%b\n%Y" - # Case 4. Less than 2.5 years ............... - elif span <= 2.5 * periodsperyear: - year_start = period_break(dates_, "year") - quarter_start = period_break(dates_, "quarter") - month_start = period_break(dates_, "month") - info_maj[quarter_start] = True - info_min[month_start] = True - info_fmt[quarter_start] = "%b" - info_fmt[year_start] = "%b\n%Y" - # Case 4. Less than 4 years ................. - elif span <= 4 * periodsperyear: - year_start = period_break(dates_, "year") - month_start = period_break(dates_, "month") - info_maj[year_start] = True - info_min[month_start] = True - info_min[year_start] = False - - month_break = dates_[month_start].month - jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] - info_fmt[jan_or_jul] = "%b" - info_fmt[year_start] = "%b\n%Y" - # Case 5. Less than 11 years ................ - elif span <= 11 * periodsperyear: - year_start = period_break(dates_, "year") - quarter_start = period_break(dates_, "quarter") - info_maj[year_start] = True - info_min[quarter_start] = True - info_min[year_start] = False - info_fmt[year_start] = "%Y" - # Case 6. More than 12 years ................ - else: - year_start = period_break(dates_, "year") - year_break = dates_[year_start].year - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - major_idx = year_start[(year_break % maj_anndef == 0)] - info_maj[major_idx] = True - minor_idx = year_start[(year_break % min_anndef == 0)] - info_min[minor_idx] = True - info_fmt[major_idx] = "%Y" - - return info - - -def _monthly_finder(vmin, vmax, freq): - periodsperyear = 12 - - vmin_orig = vmin - (vmin, vmax) = (int(vmin), int(vmax)) - span = vmax - vmin + 1 - - # Initialize the output - info = np.zeros( - span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] - ) - info["val"] = np.arange(vmin, vmax + 1) - dates_ = info["val"] - info["fmt"] = "" - year_start = (dates_ % 12 == 0).nonzero()[0] - info_maj = info["maj"] - info_fmt = info["fmt"] - - if span <= 1.15 * periodsperyear: - info_maj[year_start] = True - info["min"] = True - - info_fmt[:] = "%b" - info_fmt[year_start] = "%b\n%Y" - - if not has_level_label(year_start, vmin_orig): - if dates_.size > 1: - idx = 1 - else: - idx = 0 - info_fmt[idx] = "%b\n%Y" - - elif span <= 2.5 * periodsperyear: - quarter_start = (dates_ % 3 == 0).nonzero() - info_maj[year_start] = True - # TODO: Check the following : is it really info['fmt'] ? - info["fmt"][quarter_start] = True - info["min"] = True - - info_fmt[quarter_start] = "%b" - info_fmt[year_start] = "%b\n%Y" - - elif span <= 4 * periodsperyear: - info_maj[year_start] = True - info["min"] = True - - jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) - info_fmt[jan_or_jul] = "%b" - info_fmt[year_start] = "%b\n%Y" - - elif span <= 11 * periodsperyear: - quarter_start = (dates_ % 3 == 0).nonzero() - info_maj[year_start] = True - info["min"][quarter_start] = True - - info_fmt[year_start] = "%Y" - - else: - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - years = dates_[year_start] // 12 + 1 - major_idx = year_start[(years % maj_anndef == 0)] - info_maj[major_idx] = True - info["min"][year_start[(years % min_anndef == 0)]] = True - - info_fmt[major_idx] = "%Y" - - return info - - -def _quarterly_finder(vmin, vmax, freq): - periodsperyear = 4 - vmin_orig = vmin - (vmin, vmax) = (int(vmin), int(vmax)) - span = vmax - vmin + 1 - - info = np.zeros( - span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] - ) - info["val"] = np.arange(vmin, vmax + 1) - info["fmt"] = "" - dates_ = info["val"] - info_maj = info["maj"] - info_fmt = info["fmt"] - year_start = (dates_ % 4 == 0).nonzero()[0] - - if span <= 3.5 * periodsperyear: - info_maj[year_start] = True - info["min"] = True - - info_fmt[:] = "Q%q" - info_fmt[year_start] = "Q%q\n%F" - if not has_level_label(year_start, vmin_orig): - if dates_.size > 1: - idx = 1 - else: - idx = 0 - info_fmt[idx] = "Q%q\n%F" - - elif span <= 11 * periodsperyear: - info_maj[year_start] = True - info["min"] = True - info_fmt[year_start] = "%F" - - else: - # https://github.com/pandas-dev/pandas/pull/47602 - years = dates_[year_start] // 4 + 1970 - nyears = span / periodsperyear - (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) - major_idx = year_start[(years % maj_anndef == 0)] - info_maj[major_idx] = True - info["min"][year_start[(years % min_anndef == 0)]] = True - info_fmt[major_idx] = "%F" - - return info - - -def _annual_finder(vmin, vmax, freq): - (vmin, vmax) = (int(vmin), int(vmax + 1)) - span = vmax - vmin + 1 - - info = np.zeros( - span, dtype=[("val", int), ("maj", bool), ("min", bool), ("fmt", "|S8")] - ) - info["val"] = np.arange(vmin, vmax + 1) - info["fmt"] = "" - dates_ = info["val"] - - (min_anndef, maj_anndef) = _get_default_annual_spacing(span) - major_idx = dates_ % maj_anndef == 0 - info["maj"][major_idx] = True - info["min"][(dates_ % min_anndef == 0)] = True - info["fmt"][major_idx] = "%Y" - - return info - - -def get_finder(freq: BaseOffset): - # error: "BaseOffset" has no attribute "_period_dtype_code" - dtype_code = freq._period_dtype_code # type: ignore[attr-defined] - fgroup = FreqGroup.from_period_dtype_code(dtype_code) - - if fgroup == FreqGroup.FR_ANN: - return _annual_finder - elif fgroup == FreqGroup.FR_QTR: - return _quarterly_finder - elif fgroup == FreqGroup.FR_MTH: - return _monthly_finder - elif (dtype_code >= FreqGroup.FR_BUS.value) or fgroup == FreqGroup.FR_WK: - return _daily_finder - else: # pragma: no cover - raise NotImplementedError(f"Unsupported frequency: {dtype_code}") - - -class TimeSeries_DateLocator(Locator): - """ - Locates the ticks along an axis controlled by a :class:`Series`. - - Parameters - ---------- - freq : BaseOffset - Valid frequency specifier. - minor_locator : {False, True}, optional - Whether the locator is for minor ticks (True) or not. - dynamic_mode : {True, False}, optional - Whether the locator should work in dynamic mode. - base : {int}, optional - quarter : {int}, optional - month : {int}, optional - day : {int}, optional - """ - - def __init__( - self, - freq: BaseOffset, - minor_locator: bool = False, - dynamic_mode: bool = True, - base: int = 1, - quarter: int = 1, - month: int = 1, - day: int = 1, - plot_obj=None, - ) -> None: - freq = to_offset(freq) - self.freq = freq - self.base = base - (self.quarter, self.month, self.day) = (quarter, month, day) - self.isminor = minor_locator - self.isdynamic = dynamic_mode - self.offset = 0 - self.plot_obj = plot_obj - self.finder = get_finder(freq) - - def _get_default_locs(self, vmin, vmax): - """Returns the default locations of ticks.""" - if self.plot_obj.date_axis_info is None: - self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) - - locator = self.plot_obj.date_axis_info - - if self.isminor: - return np.compress(locator["min"], locator["val"]) - return np.compress(locator["maj"], locator["val"]) - - def __call__(self): - """Return the locations of the ticks.""" - # axis calls Locator.set_axis inside set_m_formatter - - vi = tuple(self.axis.get_view_interval()) - if vi != self.plot_obj.view_interval: - self.plot_obj.date_axis_info = None - self.plot_obj.view_interval = vi - vmin, vmax = vi - if vmax < vmin: - vmin, vmax = vmax, vmin - if self.isdynamic: - locs = self._get_default_locs(vmin, vmax) - else: # pragma: no cover - base = self.base - (d, m) = divmod(vmin, base) - vmin = (d + 1) * base - locs = list(range(vmin, vmax + 1, base)) - return locs - - def autoscale(self): - """ - Sets the view limits to the nearest multiples of base that contain the - data. - """ - # requires matplotlib >= 0.98.0 - (vmin, vmax) = self.axis.get_data_interval() - - locs = self._get_default_locs(vmin, vmax) - (vmin, vmax) = locs[[0, -1]] - if vmin == vmax: - vmin -= 1 - vmax += 1 - return nonsingular(vmin, vmax) - - -# ------------------------------------------------------------------------- -# --- Formatter --- -# ------------------------------------------------------------------------- - - -class TimeSeries_DateFormatter(Formatter): - """ - Formats the ticks along an axis controlled by a :class:`PeriodIndex`. - - Parameters - ---------- - freq : BaseOffset - Valid frequency specifier. - minor_locator : bool, default False - Whether the current formatter should apply to minor ticks (True) or - major ticks (False). - dynamic_mode : bool, default True - Whether the formatter works in dynamic mode or not. - """ - - def __init__( - self, - freq: BaseOffset, - minor_locator: bool = False, - dynamic_mode: bool = True, - plot_obj=None, - ) -> None: - freq = to_offset(freq) - self.format = None - self.freq = freq - self.locs: list[Any] = [] # unused, for matplotlib compat - self.formatdict: dict[Any, Any] | None = None - self.isminor = minor_locator - self.isdynamic = dynamic_mode - self.offset = 0 - self.plot_obj = plot_obj - self.finder = get_finder(freq) - - def _set_default_format(self, vmin, vmax): - """Returns the default ticks spacing.""" - if self.plot_obj.date_axis_info is None: - self.plot_obj.date_axis_info = self.finder(vmin, vmax, self.freq) - info = self.plot_obj.date_axis_info - - if self.isminor: - format = np.compress(info["min"] & np.logical_not(info["maj"]), info) - else: - format = np.compress(info["maj"], info) - self.formatdict = {x: f for (x, _, _, f) in format} - return self.formatdict - - def set_locs(self, locs) -> None: - """Sets the locations of the ticks""" - # don't actually use the locs. This is just needed to work with - # matplotlib. Force to use vmin, vmax - - self.locs = locs - - (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) - if vi != self.plot_obj.view_interval: - self.plot_obj.date_axis_info = None - self.plot_obj.view_interval = vi - if vmax < vmin: - (vmin, vmax) = (vmax, vmin) - self._set_default_format(vmin, vmax) - - def __call__(self, x, pos: int = 0) -> str: - if self.formatdict is None: - return "" - else: - fmt = self.formatdict.pop(x, "") - if isinstance(fmt, np.bytes_): - fmt = fmt.decode("utf-8") - with warnings.catch_warnings(): - warnings.filterwarnings( - "ignore", - "Period with BDay freq is deprecated", - category=FutureWarning, - ) - period = Period(ordinal=int(x), freq=self.freq) - assert isinstance(period, Period) - return period.strftime(fmt) - - -class TimeSeries_TimedeltaFormatter(Formatter): - """ - Formats the ticks along an axis controlled by a :class:`TimedeltaIndex`. - """ - - @staticmethod - def format_timedelta_ticks(x, pos, n_decimals: int) -> str: - """ - Convert seconds to 'D days HH:MM:SS.F' - """ - s, ns = divmod(x, 10**9) - m, s = divmod(s, 60) - h, m = divmod(m, 60) - d, h = divmod(h, 24) - decimals = int(ns * 10 ** (n_decimals - 9)) - s = f"{int(h):02d}:{int(m):02d}:{int(s):02d}" - if n_decimals > 0: - s += f".{decimals:0{n_decimals}d}" - if d != 0: - s = f"{int(d):d} days {s}" - return s - - def __call__(self, x, pos: int = 0) -> str: - (vmin, vmax) = tuple(self.axis.get_view_interval()) - n_decimals = min(int(np.ceil(np.log10(100 * 10**9 / abs(vmax - vmin)))), 9) - return self.format_timedelta_ticks(x, pos, n_decimals) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/apply/common.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/apply/common.py deleted file mode 100644 index b4d153df54059ca2a82f336e19afb4297eb218a2..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/apply/common.py +++ /dev/null @@ -1,7 +0,0 @@ -from pandas.core.groupby.base import transformation_kernels - -# There is no Series.cumcount or DataFrame.cumcount -series_transform_kernels = [ - x for x in sorted(transformation_kernels) if x != "cumcount" -] -frame_transform_kernels = [x for x in sorted(transformation_kernels) if x != "cumcount"] diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py deleted file mode 100644 index 264e27c9c122ebb6d59c5b16531ebbdc8ce51320..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/frame/indexing/test_mask.py +++ /dev/null @@ -1,152 +0,0 @@ -""" -Tests for DataFrame.mask; tests DataFrame.where as a side-effect. -""" - -import numpy as np - -from pandas import ( - NA, - DataFrame, - Float64Dtype, - Series, - StringDtype, - Timedelta, - isna, -) -import pandas._testing as tm - - -class TestDataFrameMask: - def test_mask(self): - df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) - cond = df > 0 - - rs = df.where(cond, np.nan) - tm.assert_frame_equal(rs, df.mask(df <= 0)) - tm.assert_frame_equal(rs, df.mask(~cond)) - - other = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) - rs = df.where(cond, other) - tm.assert_frame_equal(rs, df.mask(df <= 0, other)) - tm.assert_frame_equal(rs, df.mask(~cond, other)) - - def test_mask2(self): - # see GH#21891 - df = DataFrame([1, 2]) - res = df.mask([[True], [False]]) - - exp = DataFrame([np.nan, 2]) - tm.assert_frame_equal(res, exp) - - def test_mask_inplace(self): - # GH#8801 - df = DataFrame(np.random.default_rng(2).standard_normal((5, 3))) - cond = df > 0 - - rdf = df.copy() - - return_value = rdf.where(cond, inplace=True) - assert return_value is None - tm.assert_frame_equal(rdf, df.where(cond)) - tm.assert_frame_equal(rdf, df.mask(~cond)) - - rdf = df.copy() - return_value = rdf.where(cond, -df, inplace=True) - assert return_value is None - tm.assert_frame_equal(rdf, df.where(cond, -df)) - tm.assert_frame_equal(rdf, df.mask(~cond, -df)) - - def test_mask_edge_case_1xN_frame(self): - # GH#4071 - df = DataFrame([[1, 2]]) - res = df.mask(DataFrame([[True, False]])) - expec = DataFrame([[np.nan, 2]]) - tm.assert_frame_equal(res, expec) - - def test_mask_callable(self): - # GH#12533 - df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) - result = df.mask(lambda x: x > 4, lambda x: x + 1) - exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]]) - tm.assert_frame_equal(result, exp) - tm.assert_frame_equal(result, df.mask(df > 4, df + 1)) - - # return ndarray and scalar - result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99) - exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]]) - tm.assert_frame_equal(result, exp) - tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99)) - - # chain - result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10) - exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]]) - tm.assert_frame_equal(result, exp) - tm.assert_frame_equal(result, (df + 2).mask((df + 2) > 8, (df + 2) + 10)) - - def test_mask_dtype_bool_conversion(self): - # GH#3733 - df = DataFrame(data=np.random.default_rng(2).standard_normal((100, 50))) - df = df.where(df > 0) # create nans - bools = df > 0 - mask = isna(df) - expected = bools.astype(object).mask(mask) - result = bools.mask(mask) - tm.assert_frame_equal(result, expected) - - -def test_mask_stringdtype(frame_or_series): - # GH 40824 - obj = DataFrame( - {"A": ["foo", "bar", "baz", NA]}, - index=["id1", "id2", "id3", "id4"], - dtype=StringDtype(), - ) - filtered_obj = DataFrame( - {"A": ["this", "that"]}, index=["id2", "id3"], dtype=StringDtype() - ) - expected = DataFrame( - {"A": [NA, "this", "that", NA]}, - index=["id1", "id2", "id3", "id4"], - dtype=StringDtype(), - ) - if frame_or_series is Series: - obj = obj["A"] - filtered_obj = filtered_obj["A"] - expected = expected["A"] - - filter_ser = Series([False, True, True, False]) - result = obj.mask(filter_ser, filtered_obj) - - tm.assert_equal(result, expected) - - -def test_mask_where_dtype_timedelta(): - # https://github.com/pandas-dev/pandas/issues/39548 - df = DataFrame([Timedelta(i, unit="d") for i in range(5)]) - - expected = DataFrame(np.full(5, np.nan, dtype="timedelta64[ns]")) - tm.assert_frame_equal(df.mask(df.notna()), expected) - - expected = DataFrame( - [np.nan, np.nan, np.nan, Timedelta("3 day"), Timedelta("4 day")] - ) - tm.assert_frame_equal(df.where(df > Timedelta(2, unit="d")), expected) - - -def test_mask_return_dtype(): - # GH#50488 - ser = Series([0.0, 1.0, 2.0, 3.0], dtype=Float64Dtype()) - cond = ~ser.isna() - other = Series([True, False, True, False]) - excepted = Series([1.0, 0.0, 1.0, 0.0], dtype=ser.dtype) - result = ser.mask(cond, other) - tm.assert_series_equal(result, excepted) - - -def test_mask_inplace_no_other(): - # GH#51685 - df = DataFrame({"a": [1.0, 2.0], "b": ["x", "y"]}) - cond = DataFrame({"a": [True, False], "b": [False, True]}) - df.mask(cond, inplace=True) - expected = DataFrame({"a": [np.nan, 2], "b": ["x", np.nan]}) - tm.assert_frame_equal(df, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_size.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_size.py deleted file mode 100644 index 20a454996fa4488501d6f623ad3afc6fa38e5634..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/series/methods/test_size.py +++ /dev/null @@ -1,22 +0,0 @@ -import pytest - -from pandas import Series - - -@pytest.mark.parametrize( - "data, index, expected", - [ - ([1, 2, 3], None, 3), - ({"a": 1, "b": 2, "c": 3}, None, 3), - ([1, 2, 3], ["x", "y", "z"], 3), - ([1, 2, 3, 4, 5], ["x", "y", "z", "w", "n"], 5), - ([1, 2, 3], None, 3), - ([1, 2, 3], ["x", "y", "z"], 3), - ([1, 2, 3, 4], ["x", "y", "z", "w"], 4), - ], -) -def test_series(data, index, expected): - # GH#52897 - ser = Series(data, index=index) - assert ser.size == expected - assert isinstance(ser.size, int) diff --git a/spaces/pseudolab/Finetune-Model/README.md b/spaces/pseudolab/Finetune-Model/README.md deleted file mode 100644 index 2ab4aebe218676d37a6fb194259d911ab2f49b79..0000000000000000000000000000000000000000 --- a/spaces/pseudolab/Finetune-Model/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: AutoTrain Advanced -emoji: 🚀 -colorFrom: blue -colorTo: green -sdk: docker -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pytorch/Transformer_NMT/README.md b/spaces/pytorch/Transformer_NMT/README.md deleted file mode 100644 index 84f67b7380eb0e6dddec1ad31e3bcf714982c316..0000000000000000000000000000000000000000 --- a/spaces/pytorch/Transformer_NMT/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Transformer_NMT -emoji: 🐢 -colorFrom: pink -colorTo: indigo -sdk: gradio -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/qingxu98/gpt-academic/themes/green.css b/spaces/qingxu98/gpt-academic/themes/green.css deleted file mode 100644 index dd109d53fda81949834f74d767c77940709d557c..0000000000000000000000000000000000000000 --- a/spaces/qingxu98/gpt-academic/themes/green.css +++ /dev/null @@ -1,831 +0,0 @@ -:root { - --chatbot-color-light: #000000; - --chatbot-color-dark: #FFFFFF; - --chatbot-background-color-light: #F3F3F3; - --chatbot-background-color-dark: #121111; - --message-user-background-color-light: #95EC69; - --message-user-background-color-dark: #26B561; - --message-bot-background-color-light: #FFFFFF; - --message-bot-background-color-dark: #2C2C2C; -} -mspace { - display: block; -} -@media only screen and (max-width: 767px) { - #column_1 { - display: none !important; - } -} -@keyframes highlight { - 0%, 100% { - border: 2px solid transparent; - } - 50% { - border-color: yellow; - } -} -.normal_mut_select .svelte-1gfkn6j { - float: left; - width: auto; - line-height: 260% !important; -} -#highlight_update { - animation-name: highlight; - animation-duration: 0.75s; - animation-iteration-count: 3; -} - -.table-wrap.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno { - border: 0px solid var(--border-color-primary) !important; -} - -#examples_col { - z-index: 2; - position: absolute; - bottom: 0; - left: 0; - width: 100%; - margin-bottom: 30% !important; -} -#hide_examples { - z-index: 0; -} - -#debug_mes { - position: absolute; - display: flex; - bottom: 0; - left: 0; - z-index: 1; /* 设置更高的 z-index 值 */ - margin-bottom: -4px !important; - align-self: flex-end; -} -#chat_box { - display: flex; - flex-direction: column; - overflow-y: visible !important; - z-index: 3; - flex-grow: 1; /* 自动填充剩余空间 */ - position: absolute; - bottom: 0; - left: 0; - width: 100%; - margin-bottom: 30px !important; - border: 1px solid var(--border-color-primary); -} -.toast-body { - z-index: 5 !important; -} -.chat_input { - -} -.sm_btn { - position: relative; - bottom: 5px; - height: 10%; - border-radius: 20px!important; - min-width: min(10%,100%) !important; - overflow: hidden; -} -.sm_select { - position: relative !important; - z-index: 5 !important; - bottom: 5px; - min-width: min(20%,100%) !important; - border-radius: 20px!important; -} -.sm_checkbox { - position: relative !important; - z-index: 5 !important; - bottom: 5px; - padding: 0 !important; -} -.sm_select .wrap-inner.svelte-aqlk7e.svelte-aqlk7e.svelte-aqlk7e { - padding: 0 !important; -} -.sm_select .block.svelte-mppz8v { - width: 10% !important; -} - -button.sm { - padding: 6px 8px !important; -} - -/* usage_display */ -.insert_block { - position: relative; - bottom: 2px; - min-width: min(55px,100%) !important; -} - -.submit_btn { - flex-direction: column-reverse; - overflow-y: auto !important; - position: absolute; - bottom: 0; - right: 10px; - margin-bottom: 10px !important; - min-width: min(50px,100%) !important; -} - -textarea { - resize: none; - height: 100%; /* 填充父元素的高度 */ -} -/* #main_chatbot { - height: 75vh !important; - max-height: 75vh !important; - overflow: auto !important; - z-index: 2; - transform: translateZ(0) !important; - backface-visibility: hidden !important; - will-change: transform !important; -} */ -#prompt_result{ - height: 60vh !important; - max-height: 60vh !important; -} - -#app_title { - font-weight: var(--prose-header-text-weight); - font-size: var(--text-xxl); - line-height: 1.3; - text-align: left; - margin-top: 6px; - white-space: nowrap; -} -#description { - text-align: center; - margin: 32px 0 4px 0; -} - -/* gradio的页脚信息 */ -footer { - /* display: none !important; */ - margin-top: .2em !important; - font-size: 85%; -} -#footer { - text-align: center; -} -#footer div { - display: inline-block; -} -#footer .versions{ - font-size: 85%; - opacity: 0.60; -} -/* user_info */ - -#float_display { - position: absolute; - max-height: 30px; -} -/* user_info */ -#user_info { - white-space: nowrap; - position: absolute; left: 8em; top: .2em; - z-index: var(--layer-2); - box-shadow: var(--block-shadow); - border: none; border-radius: var(--block-label-radius); - background: var(--color-accent); - padding: var(--block-label-padding); - font-size: var(--block-label-text-size); line-height: var(--line-sm); - width: auto; min-height: 30px !important; - opacity: 1; - transition: opacity 0.3s ease-in-out; -} -textarea.svelte-1pie7s6 { - background: #e7e6e6 !important; - width: 96% !important; -} - -.dark textarea.svelte-1pie7s6 { - background: var(--input-background-fill) !important; - width: 96% !important; -} - -.dark input[type=number].svelte-1cl284s { - background: #393939 !important; - border: var(--input-border-width) solid var(--input-border-color) !important; -} -/* .dark input[type="range"] { - background: #393939 !important; -} */ -#user_info .wrap { - opacity: 0; -} -#user_info p { - color: white; - font-weight: var(--block-label-text-weight); -} -#user_info.hideK { - opacity: 0; - transition: opacity 1s ease-in-out; -} -[class *= "message"] { - gap: 7px !important; - border-radius: var(--radius-xl) !important -} -/* debug_mes */ -#debug_mes { - min-height: 2em; - align-items: flex-end; - justify-content: flex-end; -} -#debug_mes p { - font-size: .85em; - font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace; - /* Windows下中文的monospace会fallback为新宋体,实在太丑,这里折中使用微软雅黑 */ - color: #000000; -} -.dark #debug_mes p { - color: #ee65ed; -} - -#debug_mes { - transition: all 0.6s; -} -#gpt-chatbot { - transition: height 0.3s ease; -} - -/* .wrap.svelte-18telvq.svelte-18telvq { - padding: var(--block-padding) !important; - height: 100% !important; - max-height: 95% !important; - overflow-y: auto !important; -}*/ -.app.svelte-1mya07g.svelte-1mya07g { - max-width: 100%; - position: relative; - padding: var(--size-4); - width: 100%; - height: 100%; -} - -.gradio-container-3-32-2 h1 { - font-weight: 700 !important; - font-size: 28px !important; -} - - -.gradio-container-3-32-2 h2 { - font-weight: 600 !important; - font-size: 24px !important; -} -.gradio-container-3-32-2 h3 { - font-weight: 500 !important; - font-size: 20px !important; -} -.gradio-container-3-32-2 h4 { - font-weight: 400 !important; - font-size: 16px !important; -} -.gradio-container-3-32-2 h5 { - font-weight: 300 !important; - font-size: 14px !important; -} -.gradio-container-3-32-2 h6 { - font-weight: 200 !important; - font-size: 12px !important; -} - - -#usage_display p, #usage_display span { - margin: 0; - font-size: .85em; - color: var(--body-text-color-subdued); -} -.progress-bar { - background-color: var(--input-background-fill);; - margin: .5em 0 !important; - height: 20px; - border-radius: 10px; - overflow: hidden; -} -.progress { - background-color: var(--block-title-background-fill); - height: 100%; - border-radius: 10px; - text-align: right; - transition: width 0.5s ease-in-out; -} -.progress-text { - /* color: white; */ - color: var(--color-accent) !important; - font-size: 1em !important; - font-weight: bold; - padding-right: 10px; - line-height: 20px; -} - -.apSwitch { - top: 2px; - display: inline-block; - height: 24px; - position: relative; - width: 48px; - border-radius: 12px; -} -.apSwitch input { - display: none !important; -} -.apSlider { - background-color: var(--neutral-200); - bottom: 0; - cursor: pointer; - left: 0; - position: absolute; - right: 0; - top: 0; - transition: .4s; - font-size: 18px; - border-radius: 7px; -} -.apSlider::before { - bottom: -1.5px; - left: 1px; - position: absolute; - transition: .4s; - content: "🌞"; -} -hr.append-display { - margin: 8px 0; - border: none; - height: 1px; - border-top-width: 0; - background-image: linear-gradient(to right, rgba(50,50,50, 0.1), rgba(150, 150, 150, 0.8), rgba(50,50,50, 0.1)); -} -.source-a { - font-size: 0.8em; - max-width: 100%; - margin: 0; - display: flex; - flex-direction: row; - flex-wrap: wrap; - align-items: center; - /* background-color: #dddddd88; */ - border-radius: 1.5rem; - padding: 0.2em; -} -.source-a a { - display: inline-block; - background-color: #aaaaaa50; - border-radius: 1rem; - padding: 0.5em; - text-align: center; - text-overflow: ellipsis; - overflow: hidden; - min-width: 20%; - white-space: nowrap; - margin: 0.2rem 0.1rem; - text-decoration: none !important; - flex: 1; - transition: flex 0.5s; -} -.source-a a:hover { - background-color: #aaaaaa20; - flex: 2; -} -input:checked + .apSlider { - background-color: var(--primary-600); -} -input:checked + .apSlider::before { - transform: translateX(23px); - content:"🌚"; -} - -/* Override Slider Styles (for webkit browsers like Safari and Chrome) - * 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410 - * 进度滑块在各个平台还是太不统一了 - */ -input[type="range"] { - -webkit-appearance: none; - height: 4px; - background: var(--input-background-fill); - border-radius: 5px; - background-image: linear-gradient(var(--primary-500),var(--primary-500)); - background-size: 0% 100%; - background-repeat: no-repeat; -} -input[type="range"]::-webkit-slider-thumb { - -webkit-appearance: none; - height: 20px; - width: 20px; - border-radius: 50%; - border: solid 0.5px #ddd; - background-color: white; - cursor: ew-resize; - box-shadow: var(--input-shadow); - transition: background-color .1s ease; -} -input[type="range"]::-webkit-slider-thumb:hover { - background: var(--neutral-50); -} -input[type="range"]::-webkit-slider-runnable-track { - -webkit-appearance: none; - box-shadow: none; - border: none; - background: transparent; -} - -.submit_btn, #cancel_btn { - height: 42px !important; -} -.submit_btn::before { - content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} - -#cancel_btn::before { - content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E"); - height: 21px; -} -/* list */ -ol:not(.options), ul:not(.options) { - padding-inline-start: 2em !important; -} - -/* 亮色(默认) */ -#gpt-chatbot { - background-color: var(--chatbot-background-color-light) !important; - color: var(--chatbot-color-light) !important; - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.06); -} -/* 暗色 */ -.dark #gpt-chatbot { - background-color: var(--block-background-fill) !important; - color: var(--chatbot-color-dark) !important; - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.2); -} - -#gpt-panel > div { - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.06); -} -.dark #gpt-panel > div { - box-shadow: 0 0 12px 4px rgba(0, 0, 0, 0.2); -} - -/* 屏幕宽度大于等于500px的设备 */ -/* update on 2023.4.8: 高度的细致调整已写入JavaScript */ -/* @media screen and (min-width: 500px) { - #main_chatbot { - height: calc(100vh - 200px); - } - #main_chatbot .wrap { - max-height: calc(100vh - 200px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } -} */ -/* 屏幕宽度小于500px的设备 */ -/* @media screen and (max-width: 499px) { - #main_chatbot { - height: calc(100vh - 140px); - } - #main_chatbot .wrap { - max-height: calc(100vh - 140px - var(--line-sm)*1rem - 2*var(--block-label-margin) ); - } - [data-testid = "bot"] { - max-width: 95% !important; - } - #app_title h1{ - letter-spacing: -1px; font-size: 22px; - } -} */ -#gpt-chatbot .wrap { - overflow-x: hidden -} -/* 对话气泡 */ -.message { - border-radius: var(--radius-xl) !important; - border: none; - padding: var(--spacing-xl) !important; - font-size: 15px !important; - line-height: var(--line-md) !important; - min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); - min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl)); -} -[data-testid = "bot"] { - max-width: 85%; - border-bottom-left-radius: 0 !important; - background-color: var(--message-bot-background-color-light) !important; -} -[data-testid = "user"] { - max-width: 85%; - width: auto !important; - border-bottom-right-radius: 0 !important; - background-color: var(--message-user-background-color-light) !important; -} -.dark [data-testid = "bot"] { - background-color: var(--message-bot-background-color-dark) !important; -} -.dark [data-testid = "user"] { - background-color: var(--message-user-background-color-dark) !important; -} - -.message p { - margin-top: 0.6em !important; - margin-bottom: 0.6em !important; -} -.message p:first-child { margin-top: 0 !important; } -.message p:last-of-type { margin-bottom: 0 !important; } - -.message .md-message { - display: block; - padding: 0 !important; -} -.message .raw-message { - display: block; - padding: 0 !important; - white-space: pre-wrap; -} -.raw-message.hideM, .md-message.hideM { - display: none; -} - -/* custom buttons */ -.chuanhu-btn { - border-radius: 5px; - /* background-color: #E6E6E6 !important; */ - color: rgba(120, 120, 120, 0.64) !important; - padding: 4px !important; - position: absolute; - right: -22px; - cursor: pointer !important; - transition: color .2s ease, background-color .2s ease; -} -.chuanhu-btn:hover { - background-color: rgba(167, 167, 167, 0.25) !important; - color: unset !important; -} -.chuanhu-btn:active { - background-color: rgba(167, 167, 167, 0.5) !important; -} -.chuanhu-btn:focus { - outline: none; -} -.copy-bot-btn { - /* top: 18px; */ - bottom: 0; -} -.toggle-md-btn { - /* top: 0; */ - bottom: 20px; -} -.copy-code-btn { - position: relative; - float: right; - font-size: 1em; - cursor: pointer; -} - -.message-wrap>div img{ - border-radius: 10px !important; -} - -/* history message */ -.wrap>.history-message { - padding: 10px !important; -} -.history-message { - /* padding: 0 !important; */ - opacity: 80%; - display: flex; - flex-direction: column; -} -.history-message>.history-message { - padding: 0 !important; -} -.history-message>.message-wrap { - padding: 0 !important; - margin-bottom: 16px; -} -.history-message>.message { - margin-bottom: 16px; -} -.wrap>.history-message::after { - content: ""; - display: block; - height: 2px; - background-color: var(--body-text-color-subdued); - margin-bottom: 10px; - margin-top: -10px; - clear: both; -} -.wrap>.history-message>:last-child::after { - content: "仅供查看"; - display: block; - text-align: center; - color: var(--body-text-color-subdued); - font-size: 0.8em; -} - -/* 表格 */ -table { - margin: 1em 0; - border-collapse: collapse; - empty-cells: show; -} -td,th { - border: 1.2px solid var(--border-color-primary) !important; - padding: 0.2em; -} -thead { - background-color: rgba(175,184,193,0.2); -} -thead th { - padding: .5em .2em; -} -/* 行内代码 */ -.message :not(pre) code { - display: inline; - white-space: break-spaces; - border-radius: 6px; - margin: 0 2px 0 2px; - padding: .2em .4em .1em .4em; - background-color: rgba(175,184,193,0.2); -} -/* 代码块 */ -.message pre code { - display: block; - overflow: auto; - white-space: pre; - background-color: hsla(0, 0%, 7%, 70%)!important; - border-radius: 10px; - padding: 1.2em 1em 0em .5em; - margin: 0.6em 2em 1em 0.2em; - color: #FFF; - box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2); -} -.dark .message pre code { - background-color: hsla(0, 0%, 20%, 300%)!important; -} -.message pre { - padding: 0 !important; -} -.message pre code div.highlight { - background-color: unset !important; -} - -button.copy-button { - display: none; -} - -/* 代码高亮样式 */ -.codehilite .hll { background-color: #6e7681 } -.codehilite .c { color: #8b949e; font-style: italic } /* Comment */ -.codehilite .err { color: #f85149 } /* Error */ -.codehilite .esc { color: #c9d1d9 } /* Escape */ -.codehilite .g { color: #c9d1d9 } /* Generic */ -.codehilite .k { color: #ff7b72 } /* Keyword */ -.codehilite .l { color: #a5d6ff } /* Literal */ -.codehilite .n { color: #c9d1d9 } /* Name */ -.codehilite .o { color: #ff7b72; font-weight: bold } /* Operator */ -.codehilite .x { color: #c9d1d9 } /* Other */ -.codehilite .p { color: #c9d1d9 } /* Punctuation */ -.codehilite .ch { color: #8b949e; font-style: italic } /* Comment.Hashbang */ -.codehilite .cm { color: #8b949e; font-style: italic } /* Comment.Multiline */ -.codehilite .cp { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Preproc */ -.codehilite .cpf { color: #8b949e; font-style: italic } /* Comment.PreprocFile */ -.codehilite .c1 { color: #8b949e; font-style: italic } /* Comment.Single */ -.codehilite .cs { color: #8b949e; font-weight: bold; font-style: italic } /* Comment.Special */ -.codehilite .gd { color: #ffa198; background-color: #490202 } /* Generic.Deleted */ -.codehilite .ge { color: #c9d1d9; font-style: italic } /* Generic.Emph */ -.codehilite .gr { color: #ffa198 } /* Generic.Error */ -.codehilite .gh { color: #79c0ff; font-weight: bold } /* Generic.Heading */ -.codehilite .gi { color: #56d364; background-color: #0f5323 } /* Generic.Inserted */ -.codehilite .go { color: #8b949e } /* Generic.Output */ -.codehilite .gp { color: #8b949e } /* Generic.Prompt */ -.codehilite .gs { color: #c9d1d9; font-weight: bold } /* Generic.Strong */ -.codehilite .gu { color: #79c0ff } /* Generic.Subheading */ -.codehilite .gt { color: #ff7b72 } /* Generic.Traceback */ -.codehilite .g-Underline { color: #c9d1d9; text-decoration: underline } /* Generic.Underline */ -.codehilite .kc { color: #79c0ff } /* Keyword.Constant */ -.codehilite .kd { color: #ff7b72 } /* Keyword.Declaration */ -.codehilite .kn { color: #ff7b72 } /* Keyword.Namespace */ -.codehilite .kp { color: #79c0ff } /* Keyword.Pseudo */ -.codehilite .kr { color: #ff7b72 } /* Keyword.Reserved */ -.codehilite .kt { color: #ff7b72 } /* Keyword.Type */ -.codehilite .ld { color: #79c0ff } /* Literal.Date */ -.codehilite .m { color: #a5d6ff } /* Literal.Number */ -.codehilite .s { color: #a5d6ff } /* Literal.String */ -.codehilite .na { color: #c9d1d9 } /* Name.Attribute */ -.codehilite .nb { color: #c9d1d9 } /* Name.Builtin */ -.codehilite .nc { color: #f0883e; font-weight: bold } /* Name.Class */ -.codehilite .no { color: #79c0ff; font-weight: bold } /* Name.Constant */ -.codehilite .nd { color: #d2a8ff; font-weight: bold } /* Name.Decorator */ -.codehilite .ni { color: #ffa657 } /* Name.Entity */ -.codehilite .ne { color: #f0883e; font-weight: bold } /* Name.Exception */ -.codehilite .nf { color: #d2a8ff; font-weight: bold } /* Name.Function */ -.codehilite .nl { color: #79c0ff; font-weight: bold } /* Name.Label */ -.codehilite .nn { color: #ff7b72 } /* Name.Namespace */ -.codehilite .nx { color: #c9d1d9 } /* Name.Other */ -.codehilite .py { color: #79c0ff } /* Name.Property */ -.codehilite .nt { color: #7ee787 } /* Name.Tag */ -.codehilite .nv { color: #79c0ff } /* Name.Variable */ -.codehilite .ow { color: #ff7b72; font-weight: bold } /* Operator.Word */ -.codehilite .pm { color: #c9d1d9 } /* Punctuation.Marker */ -.codehilite .w { color: #6e7681 } /* Text.Whitespace */ -.codehilite .mb { color: #a5d6ff } /* Literal.Number.Bin */ -.codehilite .mf { color: #a5d6ff } /* Literal.Number.Float */ -.codehilite .mh { color: #a5d6ff } /* Literal.Number.Hex */ -.codehilite .mi { color: #a5d6ff } /* Literal.Number.Integer */ -.codehilite .mo { color: #a5d6ff } /* Literal.Number.Oct */ -.codehilite .sa { color: #79c0ff } /* Literal.String.Affix */ -.codehilite .sb { color: #a5d6ff } /* Literal.String.Backtick */ -.codehilite .sc { color: #a5d6ff } /* Literal.String.Char */ -.codehilite .dl { color: #79c0ff } /* Literal.String.Delimiter */ -.codehilite .sd { color: #a5d6ff } /* Literal.String.Doc */ -.codehilite .s2 { color: #a5d6ff } /* Literal.String.Double */ -.codehilite .se { color: #79c0ff } /* Literal.String.Escape */ -.codehilite .sh { color: #79c0ff } /* Literal.String.Heredoc */ -.codehilite .si { color: #a5d6ff } /* Literal.String.Interpol */ -.codehilite .sx { color: #a5d6ff } /* Literal.String.Other */ -.codehilite .sr { color: #79c0ff } /* Literal.String.Regex */ -.codehilite .s1 { color: #a5d6ff } /* Literal.String.Single */ -.codehilite .ss { color: #a5d6ff } /* Literal.String.Symbol */ -.codehilite .bp { color: #c9d1d9 } /* Name.Builtin.Pseudo */ -.codehilite .fm { color: #d2a8ff; font-weight: bold } /* Name.Function.Magic */ -.codehilite .vc { color: #79c0ff } /* Name.Variable.Class */ -.codehilite .vg { color: #79c0ff } /* Name.Variable.Global */ -.codehilite .vi { color: #79c0ff } /* Name.Variable.Instance */ -.codehilite .vm { color: #79c0ff } /* Name.Variable.Magic */ -.codehilite .il { color: #a5d6ff } /* Literal.Number.Integer.Long */ - -.dark .codehilite .hll { background-color: #2C3B41 } -.dark .codehilite .c { color: #79d618; font-style: italic } /* Comment */ -.dark .codehilite .err { color: #FF5370 } /* Error */ -.dark .codehilite .esc { color: #89DDFF } /* Escape */ -.dark .codehilite .g { color: #EEFFFF } /* Generic */ -.dark .codehilite .k { color: #BB80B3 } /* Keyword */ -.dark .codehilite .l { color: #C3E88D } /* Literal */ -.dark .codehilite .n { color: #EEFFFF } /* Name */ -.dark .codehilite .o { color: #89DDFF } /* Operator */ -.dark .codehilite .p { color: #89DDFF } /* Punctuation */ -.dark .codehilite .ch { color: #79d618; font-style: italic } /* Comment.Hashbang */ -.dark .codehilite .cm { color: #79d618; font-style: italic } /* Comment.Multiline */ -.dark .codehilite .cp { color: #79d618; font-style: italic } /* Comment.Preproc */ -.dark .codehilite .cpf { color: #79d618; font-style: italic } /* Comment.PreprocFile */ -.dark .codehilite .c1 { color: #79d618; font-style: italic } /* Comment.Single */ -.dark .codehilite .cs { color: #79d618; font-style: italic } /* Comment.Special */ -.dark .codehilite .gd { color: #FF5370 } /* Generic.Deleted */ -.dark .codehilite .ge { color: #89DDFF } /* Generic.Emph */ -.dark .codehilite .gr { color: #FF5370 } /* Generic.Error */ -.dark .codehilite .gh { color: #C3E88D } /* Generic.Heading */ -.dark .codehilite .gi { color: #C3E88D } /* Generic.Inserted */ -.dark .codehilite .go { color: #79d618 } /* Generic.Output */ -.dark .codehilite .gp { color: #FFCB6B } /* Generic.Prompt */ -.dark .codehilite .gs { color: #FF5370 } /* Generic.Strong */ -.dark .codehilite .gu { color: #89DDFF } /* Generic.Subheading */ -.dark .codehilite .gt { color: #FF5370 } /* Generic.Traceback */ -.dark .codehilite .kc { color: #89DDFF } /* Keyword.Constant */ -.dark .codehilite .kd { color: #BB80B3 } /* Keyword.Declaration */ -.dark .codehilite .kn { color: #89DDFF; font-style: italic } /* Keyword.Namespace */ -.dark .codehilite .kp { color: #89DDFF } /* Keyword.Pseudo */ -.dark .codehilite .kr { color: #BB80B3 } /* Keyword.Reserved */ -.dark .codehilite .kt { color: #BB80B3 } /* Keyword.Type */ -.dark .codehilite .ld { color: #C3E88D } /* Literal.Date */ -.dark .codehilite .m { color: #F78C6C } /* Literal.Number */ -.dark .codehilite .s { color: #C3E88D } /* Literal.String */ -.dark .codehilite .na { color: #BB80B3 } /* Name.Attribute */ -.dark .codehilite .nb { color: #82AAFF } /* Name.Builtin */ -.dark .codehilite .nc { color: #FFCB6B } /* Name.Class */ -.dark .codehilite .no { color: #EEFFFF } /* Name.Constant */ -.dark .codehilite .nd { color: #82AAFF } /* Name.Decorator */ -.dark .codehilite .ni { color: #89DDFF } /* Name.Entity */ -.dark .codehilite .ne { color: #FFCB6B } /* Name.Exception */ -.dark .codehilite .nf { color: #82AAFF } /* Name.Function */ -.dark .codehilite .nl { color: #82AAFF } /* Name.Label */ -.dark .codehilite .nn { color: #FFCB6B } /* Name.Namespace */ -.dark .codehilite .nx { color: #EEFFFF } /* Name.Other */ -.dark .codehilite .py { color: #FFCB6B } /* Name.Property */ -.dark .codehilite .nt { color: #FF5370 } /* Name.Tag */ -.dark .codehilite .nv { color: #89DDFF } /* Name.Variable */ -.dark .codehilite .ow { color: #89DDFF; font-style: italic } /* Operator.Word */ -.dark .codehilite .pm { color: #89DDFF } /* Punctuation.Marker */ -.dark .codehilite .w { color: #EEFFFF } /* Text.Whitespace */ -.dark .codehilite .mb { color: #F78C6C } /* Literal.Number.Bin */ -.dark .codehilite .mf { color: #F78C6C } /* Literal.Number.Float */ -.dark .codehilite .mh { color: #F78C6C } /* Literal.Number.Hex */ -.dark .codehilite .mi { color: #F78C6C } /* Literal.Number.Integer */ -.dark .codehilite .mo { color: #F78C6C } /* Literal.Number.Oct */ -.dark .codehilite .sa { color: #BB80B3 } /* Literal.String.Affix */ -.dark .codehilite .sb { color: #C3E88D } /* Literal.String.Backtick */ -.dark .codehilite .sc { color: #C3E88D } /* Literal.String.Char */ -.dark .codehilite .dl { color: #EEFFFF } /* Literal.String.Delimiter */ -.dark .codehilite .sd { color: #79d618; font-style: italic } /* Literal.String.Doc */ -.dark .codehilite .s2 { color: #C3E88D } /* Literal.String.Double */ -.dark .codehilite .se { color: #EEFFFF } /* Literal.String.Escape */ -.dark .codehilite .sh { color: #C3E88D } /* Literal.String.Heredoc */ -.dark .codehilite .si { color: #89DDFF } /* Literal.String.Interpol */ -.dark .codehilite .sx { color: #C3E88D } /* Literal.String.Other */ -.dark .codehilite .sr { color: #89DDFF } /* Literal.String.Regex */ -.dark .codehilite .s1 { color: #C3E88D } /* Literal.String.Single */ -.dark .codehilite .ss { color: #89DDFF } /* Literal.String.Symbol */ -.dark .codehilite .bp { color: #89DDFF } /* Name.Builtin.Pseudo */ -.dark .codehilite .fm { color: #82AAFF } /* Name.Function.Magic */ -.dark .codehilite .vc { color: #89DDFF } /* Name.Variable.Class */ -.dark .codehilite .vg { color: #89DDFF } /* Name.Variable.Global */ -.dark .codehilite .vi { color: #89DDFF } /* Name.Variable.Instance */ -.dark .codehilite .vm { color: #82AAFF } /* Name.Variable.Magic */ -.dark .codehilite .il { color: #F78C6C } /* Literal.Number.Integer.Long */ diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Audirvana Plus 3.2.15 Crack Mac Osx.md b/spaces/quidiaMuxgu/Expedit-SAM/Audirvana Plus 3.2.15 Crack Mac Osx.md deleted file mode 100644 index 868ef393274d1e71ec76ead1957e31fd95ab9cd9..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Audirvana Plus 3.2.15 Crack Mac Osx.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Audirvana Plus 3.2.15 Crack Mac Osx


        DOWNLOAD > https://geags.com/2uCsp9



        - -audirvana vs roon, HEOS (10 Similar Apps & 21647 Reviews) vs HK Alexa ... Audirvana Plus 3.2.15 Crack Mac Osx audirvana plus catalina, audirvana plus ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Lotr Return Of The King Cd Crack.md b/spaces/quidiaMuxgu/Expedit-SAM/Lotr Return Of The King Cd Crack.md deleted file mode 100644 index c8d0e8330988e66da38429bfc894b3db08166642..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Lotr Return Of The King Cd Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

        lotr return of the king cd crack


        Download File 🆗 https://geags.com/2uCpVG



        -
        -Lotr Return Of The King Cd Crack DOWNLOAD: http://geags.com/1fjml7 lotr return king extended edition length, lotr return king, lotr return king cost, cast lotr ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/vc_infer_pipeline.py b/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/vc_infer_pipeline.py deleted file mode 100644 index e4a99dabdddf2e45fd13464a40d6f32c26221214..0000000000000000000000000000000000000000 --- a/spaces/r3gm/SoniTranslate_translate_audio_of_a_video_content/vc_infer_pipeline.py +++ /dev/null @@ -1,445 +0,0 @@ -import numpy as np, parselmouth, torch, pdb, sys, os -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal -import pyworld, os, traceback, faiss, librosa, torchcrepe -from scipy import signal -from functools import lru_cache - -now_dir = os.getcwd() -sys.path.append(now_dir) - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - -input_audio_path2wav = {} - - -@lru_cache -def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period): - audio = input_audio_path2wav[input_audio_path] - f0, t = pyworld.harvest( - audio, - fs=fs, - f0_ceil=f0max, - f0_floor=f0min, - frame_period=frame_period, - ) - f0 = pyworld.stonemask(audio, f0, t, fs) - return f0 - - -def change_rms(data1, sr1, data2, sr2, rate): # 1 is the input audio, 2 is the output audio, rate is the proportion of 2 - # print(data1.max(),data2.max()) - rms1 = librosa.feature.rms( - y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2 - ) # one dot every half second - rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2) - rms1 = torch.from_numpy(rms1) - rms1 = F.interpolate( - rms1.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.from_numpy(rms2) - rms2 = F.interpolate( - rms2.unsqueeze(0), size=data2.shape[0], mode="linear" - ).squeeze() - rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6) - data2 *= ( - torch.pow(rms1, torch.tensor(1 - rate)) - * torch.pow(rms2, torch.tensor(rate - 1)) - ).numpy() - return data2 - - -class VC(object): - def __init__(self, tgt_sr, config): - self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = ( - config.x_pad, - config.x_query, - config.x_center, - config.x_max, - config.is_half, - ) - self.sr = 16000 # hubert input sampling rate - self.window = 160 # points per frame - self.t_pad = self.sr * self.x_pad # Pad time before and after each bar - self.t_pad_tgt = tgt_sr * self.x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * self.x_query # Query time before and after the cut point - self.t_center = self.sr * self.x_center # Query point cut position - self.t_max = self.sr * self.x_max # Query-free duration threshold - self.device = config.device - - def get_f0( - self, - input_audio_path, - x, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0=None, - ): - global input_audio_path2wav - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - input_audio_path2wav[input_audio_path] = x.astype(np.double) - f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10) - if filter_radius > 2: - f0 = signal.medfilt(f0, 3) - elif f0_method == "crepe": - model = "full" - # Pick a batch size that doesn't cause memory errors on your gpu - batch_size = 512 - # Compute pitch using first gpu - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - self.window, - f0_min, - f0_max, - model, - batch_size=batch_size, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - elif f0_method == "rmvpe": - if hasattr(self, "model_rmvpe") == False: - from lib.rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # f0 points per second - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0] - f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[ - :shape - ] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(int) # change np.int - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9 if version == "v1" else 12, - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) if version == "v1" else logits[0] - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = feats.clone() - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - - # _, I = index.search(npy, 1) - # npy = big_npy[I.squeeze()] - - score, ix = index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - if protect < 0.5 and pitch != None and pitchf != None: - feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute( - 0, 2, 1 - ) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - - if protect < 0.5 and pitch != None and pitchf != None: - pitchff = pitchf.clone() - pitchff[pitchf > 0] = 1 - pitchff[pitchf < 1] = protect - pitchff = pitchff.unsqueeze(-1) - feats = feats * pitchff + feats0 * (1 - pitchff) - feats = feats.to(feats0.dtype) - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]) - .data.cpu() - .float() - .numpy() - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy() - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - input_audio_path, - times, - f0_up_key, - f0_method, - file_index, - # file_big_npy, - index_rate, - if_f0, - filter_radius, - tgt_sr, - resample_sr, - rms_mix_rate, - version, - protect, - f0_file=None, - ): - if ( - file_index != "" - # and file_big_npy != "" - # and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - # big_npy = np.load(file_big_npy) - big_npy = index.reconstruct_n(0, index.ntotal) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("File index Not found, set None") - - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0( - input_audio_path, - audio_pad, - p_len, - f0_up_key, - f0_method, - filter_radius, - inp_f0, - ) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - if self.device == "mps": - pitchf = pitchf.astype(np.float32) - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - version, - protect, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - if rms_mix_rate != 1: - audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate) - if resample_sr >= 16000 and tgt_sr != resample_sr: - audio_opt = librosa.resample( - audio_opt, orig_sr=tgt_sr, target_sr=resample_sr - ) - audio_max = np.abs(audio_opt).max() / 0.99 - max_int16 = 32768 - if audio_max > 1: - max_int16 /= audio_max - audio_opt = (audio_opt * max_int16).astype(np.int16) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/radames/NYTimes-homepage-rearranged/static/_app/assets/pages/__layout.svelte-298a4fd5.css b/spaces/radames/NYTimes-homepage-rearranged/static/_app/assets/pages/__layout.svelte-298a4fd5.css deleted file mode 100644 index dab39932985b435820425a1ee5ebefccbf822576..0000000000000000000000000000000000000000 --- a/spaces/radames/NYTimes-homepage-rearranged/static/_app/assets/pages/__layout.svelte-298a4fd5.css +++ /dev/null @@ -1 +0,0 @@ -*,:before,:after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:before,:after{--tw-content: ""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,"Apple Color Emoji","Segoe UI Emoji",Segoe UI Symbol,"Noto Color Emoji"}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,samp,pre{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}button,[type=button],[type=reset],[type=submit]{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dl,dd,h1,h2,h3,h4,h5,h6,hr,figure,p,pre{margin:0}fieldset{margin:0;padding:0}legend{padding:0}ol,ul,menu{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-moz-placeholder,textarea::-moz-placeholder{opacity:1;color:#9ca3af}input:-ms-input-placeholder,textarea:-ms-input-placeholder{opacity:1;color:#9ca3af}input::placeholder,textarea::placeholder{opacity:1;color:#9ca3af}button,[role=button]{cursor:pointer}:disabled{cursor:default}img,svg,video,canvas,audio,iframe,embed,object{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}*,:before,:after{--tw-translate-x: 0;--tw-translate-y: 0;--tw-rotate: 0;--tw-skew-x: 0;--tw-skew-y: 0;--tw-scale-x: 1;--tw-scale-y: 1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness: proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width: 0px;--tw-ring-offset-color: #fff;--tw-ring-color: rgb(59 130 246 / .5);--tw-ring-offset-shadow: 0 0 #0000;--tw-ring-shadow: 0 0 #0000;--tw-shadow: 0 0 #0000;--tw-shadow-colored: 0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }.prose{color:var(--tw-prose-body);max-width:65ch}.prose :where([class~="lead"]):not(:where([class~="not-prose"] *)){color:var(--tw-prose-lead);font-size:1.25em;line-height:1.6;margin-top:1.2em;margin-bottom:1.2em}.prose :where(a):not(:where([class~="not-prose"] *)){color:var(--tw-prose-links);text-decoration:underline;font-weight:500}.prose :where(strong):not(:where([class~="not-prose"] *)){color:var(--tw-prose-bold);font-weight:600}.prose :where(ol):not(:where([class~="not-prose"] *)){list-style-type:decimal;padding-left:1.625em}.prose :where(ol[type="A"]):not(:where([class~="not-prose"] *)){list-style-type:upper-alpha}.prose :where(ol[type="a"]):not(:where([class~="not-prose"] *)){list-style-type:lower-alpha}.prose :where(ol[type="A" s]):not(:where([class~="not-prose"] *)){list-style-type:upper-alpha}.prose :where(ol[type="a" s]):not(:where([class~="not-prose"] *)){list-style-type:lower-alpha}.prose :where(ol[type="I"]):not(:where([class~="not-prose"] *)){list-style-type:upper-roman}.prose :where(ol[type="i"]):not(:where([class~="not-prose"] *)){list-style-type:lower-roman}.prose :where(ol[type="I" s]):not(:where([class~="not-prose"] *)){list-style-type:upper-roman}.prose :where(ol[type="i" s]):not(:where([class~="not-prose"] *)){list-style-type:lower-roman}.prose :where(ol[type="1"]):not(:where([class~="not-prose"] *)){list-style-type:decimal}.prose :where(ul):not(:where([class~="not-prose"] *)){list-style-type:disc;padding-left:1.625em}.prose :where(ol > li):not(:where([class~="not-prose"] *))::marker{font-weight:400;color:var(--tw-prose-counters)}.prose :where(ul > li):not(:where([class~="not-prose"] *))::marker{color:var(--tw-prose-bullets)}.prose :where(hr):not(:where([class~="not-prose"] *)){border-color:var(--tw-prose-hr);border-top-width:1px;margin-top:3em;margin-bottom:3em}.prose :where(blockquote):not(:where([class~="not-prose"] *)){font-weight:500;font-style:italic;color:var(--tw-prose-quotes);border-left-width:.25rem;border-left-color:var(--tw-prose-quote-borders);quotes:"\201c""\201d""\2018""\2019";margin-top:1.6em;margin-bottom:1.6em;padding-left:1em}.prose :where(blockquote p:first-of-type):not(:where([class~="not-prose"] *)):before{content:open-quote}.prose :where(blockquote p:last-of-type):not(:where([class~="not-prose"] *)):after{content:close-quote}.prose :where(h1):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:800;font-size:2.25em;margin-top:0;margin-bottom:.8888889em;line-height:1.1111111}.prose :where(h1 strong):not(:where([class~="not-prose"] *)){font-weight:900}.prose :where(h2):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:700;font-size:1.5em;margin-top:2em;margin-bottom:1em;line-height:1.3333333}.prose :where(h2 strong):not(:where([class~="not-prose"] *)){font-weight:800}.prose :where(h3):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;font-size:1.25em;margin-top:1.6em;margin-bottom:.6em;line-height:1.6}.prose :where(h3 strong):not(:where([class~="not-prose"] *)){font-weight:700}.prose :where(h4):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;margin-top:1.5em;margin-bottom:.5em;line-height:1.5}.prose :where(h4 strong):not(:where([class~="not-prose"] *)){font-weight:700}.prose :where(figure > *):not(:where([class~="not-prose"] *)){margin-top:0;margin-bottom:0}.prose :where(figcaption):not(:where([class~="not-prose"] *)){color:var(--tw-prose-captions);font-size:.875em;line-height:1.4285714;margin-top:.8571429em}.prose :where(code):not(:where([class~="not-prose"] *)){color:var(--tw-prose-code);font-weight:600;font-size:.875em}.prose :where(code):not(:where([class~="not-prose"] *)):before{content:""}.prose :where(code):not(:where([class~="not-prose"] *)):after{content:""}.prose :where(a code):not(:where([class~="not-prose"] *)){color:var(--tw-prose-links)}.prose :where(pre):not(:where([class~="not-prose"] *)){color:var(--tw-prose-pre-code);background-color:var(--tw-prose-pre-bg);overflow-x:auto;font-weight:400;font-size:.875em;line-height:1.7142857;margin-top:1.7142857em;margin-bottom:1.7142857em;border-radius:.375rem;padding:.8571429em 1.1428571em}.prose :where(pre code):not(:where([class~="not-prose"] *)){background-color:transparent;border-width:0;border-radius:0;padding:0;font-weight:inherit;color:inherit;font-size:inherit;font-family:inherit;line-height:inherit}.prose :where(pre code):not(:where([class~="not-prose"] *)):before{content:none}.prose :where(pre code):not(:where([class~="not-prose"] *)):after{content:none}.prose :where(table):not(:where([class~="not-prose"] *)){width:100%;table-layout:auto;text-align:left;margin-top:2em;margin-bottom:2em;font-size:.875em;line-height:1.7142857}.prose :where(thead):not(:where([class~="not-prose"] *)){border-bottom-width:1px;border-bottom-color:var(--tw-prose-th-borders)}.prose :where(thead th):not(:where([class~="not-prose"] *)){color:var(--tw-prose-headings);font-weight:600;vertical-align:bottom;padding-right:.5714286em;padding-bottom:.5714286em;padding-left:.5714286em}.prose :where(tbody tr):not(:where([class~="not-prose"] *)){border-bottom-width:1px;border-bottom-color:var(--tw-prose-td-borders)}.prose :where(tbody tr:last-child):not(:where([class~="not-prose"] *)){border-bottom-width:0}.prose :where(tbody td):not(:where([class~="not-prose"] *)){vertical-align:baseline;padding:.5714286em}.prose{--tw-prose-body: #374151;--tw-prose-headings: #111827;--tw-prose-lead: #4b5563;--tw-prose-links: #111827;--tw-prose-bold: #111827;--tw-prose-counters: #6b7280;--tw-prose-bullets: #d1d5db;--tw-prose-hr: #e5e7eb;--tw-prose-quotes: #111827;--tw-prose-quote-borders: #e5e7eb;--tw-prose-captions: #6b7280;--tw-prose-code: #111827;--tw-prose-pre-code: #e5e7eb;--tw-prose-pre-bg: #1f2937;--tw-prose-th-borders: #d1d5db;--tw-prose-td-borders: #e5e7eb;--tw-prose-invert-body: #d1d5db;--tw-prose-invert-headings: #fff;--tw-prose-invert-lead: #9ca3af;--tw-prose-invert-links: #fff;--tw-prose-invert-bold: #fff;--tw-prose-invert-counters: #9ca3af;--tw-prose-invert-bullets: #4b5563;--tw-prose-invert-hr: #374151;--tw-prose-invert-quotes: #f3f4f6;--tw-prose-invert-quote-borders: #374151;--tw-prose-invert-captions: #9ca3af;--tw-prose-invert-code: #fff;--tw-prose-invert-pre-code: #d1d5db;--tw-prose-invert-pre-bg: rgb(0 0 0 / 50%);--tw-prose-invert-th-borders: #4b5563;--tw-prose-invert-td-borders: #374151;font-size:1rem;line-height:1.75}.prose :where(p):not(:where([class~="not-prose"] *)){margin-top:1.25em;margin-bottom:1.25em}.prose :where(img):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(video):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(figure):not(:where([class~="not-prose"] *)){margin-top:2em;margin-bottom:2em}.prose :where(h2 code):not(:where([class~="not-prose"] *)){font-size:.875em}.prose :where(h3 code):not(:where([class~="not-prose"] *)){font-size:.9em}.prose :where(li):not(:where([class~="not-prose"] *)){margin-top:.5em;margin-bottom:.5em}.prose :where(ol > li):not(:where([class~="not-prose"] *)){padding-left:.375em}.prose :where(ul > li):not(:where([class~="not-prose"] *)){padding-left:.375em}.prose>:where(ul > li p):not(:where([class~="not-prose"] *)){margin-top:.75em;margin-bottom:.75em}.prose>:where(ul > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.25em}.prose>:where(ul > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.25em}.prose>:where(ol > li > *:first-child):not(:where([class~="not-prose"] *)){margin-top:1.25em}.prose>:where(ol > li > *:last-child):not(:where([class~="not-prose"] *)){margin-bottom:1.25em}.prose :where(ul ul,ul ol,ol ul,ol ol):not(:where([class~="not-prose"] *)){margin-top:.75em;margin-bottom:.75em}.prose :where(hr + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h2 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h3 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(h4 + *):not(:where([class~="not-prose"] *)){margin-top:0}.prose :where(thead th:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose :where(thead th:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose :where(tbody td:first-child):not(:where([class~="not-prose"] *)){padding-left:0}.prose :where(tbody td:last-child):not(:where([class~="not-prose"] *)){padding-right:0}.prose>:where(:first-child):not(:where([class~="not-prose"] *)){margin-top:0}.prose>:where(:last-child):not(:where([class~="not-prose"] *)){margin-bottom:0}.prose-gray{--tw-prose-body: #374151;--tw-prose-headings: #111827;--tw-prose-lead: #4b5563;--tw-prose-links: #111827;--tw-prose-bold: #111827;--tw-prose-counters: #6b7280;--tw-prose-bullets: #d1d5db;--tw-prose-hr: #e5e7eb;--tw-prose-quotes: #111827;--tw-prose-quote-borders: #e5e7eb;--tw-prose-captions: #6b7280;--tw-prose-code: #111827;--tw-prose-pre-code: #e5e7eb;--tw-prose-pre-bg: #1f2937;--tw-prose-th-borders: #d1d5db;--tw-prose-td-borders: #e5e7eb;--tw-prose-invert-body: #d1d5db;--tw-prose-invert-headings: #fff;--tw-prose-invert-lead: #9ca3af;--tw-prose-invert-links: #fff;--tw-prose-invert-bold: #fff;--tw-prose-invert-counters: #9ca3af;--tw-prose-invert-bullets: #4b5563;--tw-prose-invert-hr: #374151;--tw-prose-invert-quotes: #f3f4f6;--tw-prose-invert-quote-borders: #374151;--tw-prose-invert-captions: #9ca3af;--tw-prose-invert-code: #fff;--tw-prose-invert-pre-code: #d1d5db;--tw-prose-invert-pre-bg: rgb(0 0 0 / 50%);--tw-prose-invert-th-borders: #4b5563;--tw-prose-invert-td-borders: #374151}.invisible{visibility:hidden}.relative{position:relative}.col-span-2{grid-column:span 2 / span 2}.m-0{margin:0}.mx-auto{margin-left:auto;margin-right:auto}.mt-1{margin-top:.25rem}.mb-0{margin-bottom:0}.mt-0{margin-top:0}.inline-block{display:inline-block}.grid{display:grid}.aspect-\[4\/3\]{aspect-ratio:4 / 3}.h-full{height:100%}.w-full{width:100%}.max-w-\[15rem\]{max-width:15rem}.max-w-prose{max-width:65ch}.max-w-4xl{max-width:56rem}@-webkit-keyframes spin{to{transform:rotate(360deg)}}@keyframes spin{to{transform:rotate(360deg)}}.animate-spin{-webkit-animation:spin 1s linear infinite;animation:spin 1s linear infinite}.cursor-pointer{cursor:pointer}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.rounded{border-radius:.25rem}.border{border-width:1px}.border-b{border-bottom-width:1px}.border-gray-200{--tw-border-opacity: 1;border-color:rgb(229 231 235 / var(--tw-border-opacity))}.bg-gray-200{--tw-bg-opacity: 1;background-color:rgb(229 231 235 / var(--tw-bg-opacity))}.bg-emerald-600{--tw-bg-opacity: 1;background-color:rgb(5 150 105 / var(--tw-bg-opacity))}.bg-red-600{--tw-bg-opacity: 1;background-color:rgb(220 38 38 / var(--tw-bg-opacity))}.object-cover{-o-object-fit:cover;object-fit:cover}.object-top{-o-object-position:top;object-position:top}.p-0{padding:0}.px-6{padding-left:1.5rem;padding-right:1.5rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.px-0{padding-left:0;padding-right:0}.py-0{padding-top:0;padding-bottom:0}.py-4{padding-top:1rem;padding-bottom:1rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.px-4{padding-left:1rem;padding-right:1rem}.py-5{padding-top:1.25rem;padding-bottom:1.25rem}.font-serif{font-family:ui-serif,Georgia,Cambria,Times New Roman,Times,serif}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.leading-tight{line-height:1.25}.leading-normal{line-height:1.5}.text-emerald-600{--tw-text-opacity: 1;color:rgb(5 150 105 / var(--tw-text-opacity))}.text-red-600{--tw-text-opacity: 1;color:rgb(220 38 38 / var(--tw-text-opacity))}.text-blue-500{--tw-text-opacity: 1;color:rgb(59 130 246 / var(--tw-text-opacity))}.text-gray-700{--tw-text-opacity: 1;color:rgb(55 65 81 / var(--tw-text-opacity))}.text-white{--tw-text-opacity: 1;color:rgb(255 255 255 / var(--tw-text-opacity))}.underline{-webkit-text-decoration-line:underline;text-decoration-line:underline}.no-underline{-webkit-text-decoration-line:none;text-decoration-line:none}.hover\:bg-zinc-300:hover{--tw-bg-opacity: 1;background-color:rgb(212 212 216 / var(--tw-bg-opacity))}.hover\:no-underline:hover{-webkit-text-decoration-line:none;text-decoration-line:none}.hover\:opacity-60:hover{opacity:.6}.hover\:opacity-50:hover{opacity:.5}.focus\:border-gray-500:focus{--tw-border-opacity: 1;border-color:rgb(107 114 128 / var(--tw-border-opacity))}.focus\:bg-white:focus{--tw-bg-opacity: 1;background-color:rgb(255 255 255 / var(--tw-bg-opacity))}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}@media (min-width: 640px){.sm\:grid-cols-3{grid-template-columns:repeat(3,minmax(0,1fr))}.sm\:pl-4{padding-left:1rem}}@media (min-width: 768px){.md\:max-w-md{max-width:28rem}} diff --git a/spaces/radwulf101/ChatGPT4/README.md b/spaces/radwulf101/ChatGPT4/README.md deleted file mode 100644 index 7938de14e5355209aaae713f289ca469181bbb17..0000000000000000000000000000000000000000 --- a/spaces/radwulf101/ChatGPT4/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Chat-with-GPT4 -emoji: 🚀 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: ysharma/ChatGPT4 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Abc Preschool Sight Word Jigsaw Puzzle Shapes For Mac.md b/spaces/raedeXanto/academic-chatgpt-beta/Abc Preschool Sight Word Jigsaw Puzzle Shapes For Mac.md deleted file mode 100644 index 93b99efb89d1dd9c83855dac78af5e69fb7d7770..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Abc Preschool Sight Word Jigsaw Puzzle Shapes For Mac.md +++ /dev/null @@ -1,14 +0,0 @@ -
        -

        ABC Preschool Sight Word Jigsaw Puzzle Shapes: A Fun and Educational App for Mac Users

        -

        If you are looking for a way to help your toddlers and preschool children learn the English alphabet and over 100 easy words, you might want to check out ABC Preschool Sight Word Jigsaw Puzzle Shapes. This app is available for macOS 10.7 or later and it offers a fun and interactive puzzle themed environment that will keep your kids engaged and entertained.

        -

        The app features 26 colorful puzzles, one for each letter of the alphabet. Each puzzle has four pieces that represent a word that starts with that letter. For example, the puzzle for letter A has pieces for apple, ant, airplane and alligator. The app also has a voice-over that pronounces each word and letter as the child drags and drops the pieces into place. This way, the child can learn how to spell and say the words while having fun.

        -

        Abc Preschool Sight Word Jigsaw Puzzle Shapes For Mac


        Download Filehttps://tinourl.com/2uL2xf



        -

        ABC Preschool Sight Word Jigsaw Puzzle Shapes is not only a great app for learning the alphabet and vocabulary, but also for developing fine motor skills, hand-eye coordination, shape recognition and cognitive skills. The app has a simple and intuitive interface that is easy to use for young children. The app also has no ads, no in-app purchases and no links to external websites, so you can let your kids play safely and without distractions.

        -

        If you are interested in downloading ABC Preschool Sight Word Jigsaw Puzzle Shapes for your Mac, you can find it on the App Store[^1^]. The app costs $2.99 and it has a 4.5-star rating from 11 reviews. You can also watch a video preview of the app on YouTube. ABC Preschool Sight Word Jigsaw Puzzle Shapes is a wonderful app that will help your kids learn while having fun.

        - -

        ABC Preschool Sight Word Jigsaw Puzzle Shapes is not only a fun and educational app for kids, but also a useful tool for parents and teachers. The app allows you to track your child's progress and achievements through a report card feature. You can see how many puzzles your child has completed, how many words they have learned and how much time they have spent on the app. You can also customize the app settings to suit your child's needs and preferences. You can choose the difficulty level of the puzzles, the voice-over language and the background music.

        -

        The app is designed by GrasshopperApps.com, a company that specializes in creating high quality educational apps for kids. The company has over 100 apps on the App Store, covering various topics such as math, reading, spelling, phonics, geography and more. The company's mission is to provide engaging and effective learning experiences for children of all ages and abilities. The company also values feedback from users and strives to improve their apps based on user suggestions and reviews.

        -

        If you are looking for a fun way to introduce your kids to the alphabet and sight words, you should definitely give ABC Preschool Sight Word Jigsaw Puzzle Shapes a try. The app is suitable for kids aged 2 to 6 years old and it works on any Mac device that runs macOS 10.7 or later. The app is a one-time purchase that gives you access to all the features and content without any hidden costs or subscriptions. You can download the app today and start enjoying the benefits of learning through play.

        -

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/raomaya/COVID_travel_dashboard/README.md b/spaces/raomaya/COVID_travel_dashboard/README.md deleted file mode 100644 index 19b96db495fe5823b463c647f631eb384823e72f..0000000000000000000000000000000000000000 --- a/spaces/raomaya/COVID_travel_dashboard/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: COVID Travel Dashboard -emoji: 🐢 -colorFrom: purple -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rayan-saleh/whisper2notion/server/node_modules/cors/HISTORY.md b/spaces/rayan-saleh/whisper2notion/server/node_modules/cors/HISTORY.md deleted file mode 100644 index 5762bce92212a44c4ceaab3cff5eded5efc72874..0000000000000000000000000000000000000000 --- a/spaces/rayan-saleh/whisper2notion/server/node_modules/cors/HISTORY.md +++ /dev/null @@ -1,58 +0,0 @@ -2.8.5 / 2018-11-04 -================== - - * Fix setting `maxAge` option to `0` - -2.8.4 / 2017-07-12 -================== - - * Work-around Safari bug in default pre-flight response - -2.8.3 / 2017-03-29 -================== - - * Fix error when options delegate missing `methods` option - -2.8.2 / 2017-03-28 -================== - - * Fix error when frozen options are passed - * Send "Vary: Origin" when using regular expressions - * Send "Vary: Access-Control-Request-Headers" when dynamic `allowedHeaders` - -2.8.1 / 2016-09-08 -================== - -This release only changed documentation. - -2.8.0 / 2016-08-23 -================== - - * Add `optionsSuccessStatus` option - -2.7.2 / 2016-08-23 -================== - - * Fix error when Node.js running in strict mode - -2.7.1 / 2015-05-28 -================== - - * Move module into expressjs organization - -2.7.0 / 2015-05-28 -================== - - * Allow array of matching condition as `origin` option - * Allow regular expression as `origin` option - -2.6.1 / 2015-05-28 -================== - - * Update `license` in package.json - -2.6.0 / 2015-04-27 -================== - - * Add `preflightContinue` option - * Fix "Vary: Origin" header added for "*" diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Boologam Tamil Movie Download ((FREE)) Tamilrockers Torrent.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Boologam Tamil Movie Download ((FREE)) Tamilrockers Torrent.md deleted file mode 100644 index 12577c4534aa67eaac15502491e38ac862e7944d..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Boologam Tamil Movie Download ((FREE)) Tamilrockers Torrent.md +++ /dev/null @@ -1,88 +0,0 @@ - -

        Boologam Tamil Movie Download Tamilrockers Torrent: A Complete Guide

        - -

        If you are a fan of Tamil movies, you might have heard of Boologam, a 2015 action film starring Jayam Ravi and Trisha Krishnan. The movie revolves around a boxer who fights against a corrupt businessman who wants to take over his land. Boologam was praised for its thrilling action sequences, powerful performances, and social message.

        -

        Boologam Tamil Movie Download Tamilrockers Torrent


        Download File ⇒⇒⇒ https://urlgoal.com/2uCLvO



        - -

        But how can you watch Boologam Tamil movie online or offline? One of the best ways to do so is by using Tamilrockers torrent site. Tamilrockers is one of the most popular and notorious torrent sites in India, offering a huge collection of movies, TV shows, web series, and music in various languages, including Tamil, Hindi, Telugu, Malayalam, and more.

        - -

        In this article, we will show you how to download Boologam Tamil movie using Tamilrockers torrent site. We will also provide you with some tips and precautions to ensure a safe and smooth downloading experience. Read on to find out more.

        - -

        How to Download Boologam Tamil Movie Using Tamilrockers Torrent Site

        - -

        Downloading Boologam Tamil movie using Tamilrockers torrent site is not very difficult, but you need to follow some steps carefully. Here are the steps you need to take:

        - -
          -
        1. First of all, you need to have a torrent client installed on your device. A torrent client is a software that allows you to download files from torrent sites. Some of the popular torrent clients are uTorrent, BitTorrent, qBittorrent, and Vuze.
        2. -
        3. Next, you need to find a working link to Tamilrockers torrent site. Tamilrockers is a banned site in India and many other countries due to its illegal content distribution. Therefore, it keeps changing its domain name and URL frequently to evade authorities. You can use a proxy site or a VPN service to access Tamilrockers safely and anonymously.
        4. -
        5. Once you access Tamilrockers torrent site, you need to search for Boologam Tamil movie using the search bar or browse through the categories. You will see a list of results with different file sizes and qualities. Choose the one that suits your preferences and click on it.
        6. -
        7. You will be redirected to another page where you will see a magnet link or a download button. Click on it and your torrent client will open automatically. You will see some details about the file, such as name, size, seeders, leechers, etc. You can also choose which files you want to download from the torrent.
        8. -
        9. Click on OK or Start and your download will begin. Depending on your internet speed and the number of seeders available, your download may take some time to complete. You can check the progress and status of your download on your torrent client.
        10. -
        11. Once your download is finished, you can open the file and enjoy watching Boologam Tamil movie on your device. You can also transfer the file to another device or burn it to a DVD if you want.
        12. -
        - -

        Tips and Precautions for Downloading Boologam Tamil Movie Using Tamilrockers Torrent Site

        - -

        While downloading Boologam Tamil movie using Tamilrockers torrent site may seem easy and convenient, it also comes with some risks and challenges. Here are some tips and precautions that you should keep in mind before downloading Boologam Tamil movie using Tamilrockers torrent site:

        - -
          -
        • Always use a VPN service or a proxy site to access Tamilrockers torrent site. This will help you avoid any legal troubles or cyberattacks that may arise from visiting a banned site. A VPN service or a proxy site will also mask your IP address and location, making you anonymous online.
        • -
        • Always scan the downloaded file for any viruses or malware before opening it. Some torrent files may contain harmful software that can damage your device or steal your personal information. You can use an antivirus program or an online scanner to check the file for any threats.
        • -
        • Always check the comments and ratings of the torrent file before downloading it. This will help you avoid any fake or corrupted files that may waste your time and bandwidth. You can also get some feedback from other users who have downloaded the same file.
        • -
        • Always seed the torrent file after downloading it. Seeding is the process of uploading the file to other users who are downloading it. This will help maintain the health and availability of the torrent file and also show your gratitude to the original uploader.
        • -
        • Always be aware of the legal implications of downloading Boologam Tamil movie using Tamilrockers torrent site. Downloading copyrighted content without permission is illegal in many countries and can result in fines or imprisonment. You should respect the rights of the creators and producers of Boologam Tamil movie and support them by watching it legally.
        • -
        - -

        Conclusion

        - -

        Boologam Tamil movie is an entertaining and inspiring film that showcases the spirit of boxing and fighting against injustice. If you want to watch Boologam Tamil movie online or offline, one of the best ways to do so is by using Tamilrockers torrent site.

        -

        - -

        However, you should also be careful and responsible when downloading Boologam Tamil movie using Tamilrockers torrent site. Follow the steps we have provided above and take the necessary precautions to ensure a safe and smooth downloading experience.

        - -

        We hope this article has helped you learn how to download Boologam Tamil movie using Tamilrockers torrent site. If you have any questions or suggestions, feel free to leave a comment below.

        -

        Alternative Ways to Watch Boologam Tamil Movie Online or Offline

        - -

        If you are not comfortable with downloading Boologam Tamil movie using Tamilrockers torrent site, or if you cannot find a working link to Tamilrockers torrent site, don't worry. There are some other ways to watch Boologam Tamil movie online or offline legally and safely. Here are some of them:

        - -
          -
        • Watch Boologam Tamil movie on streaming platforms. There are some online platforms that offer Boologam Tamil movie for streaming or rental. Some of them are Amazon Prime Video, Hotstar, Zee5, and YouTube. You can watch Boologam Tamil movie on these platforms by paying a small fee or subscribing to their service. You can also download Boologam Tamil movie on some of these platforms for offline viewing.
        • -
        • Watch Boologam Tamil movie on TV channels. There are some TV channels that broadcast Boologam Tamil movie occasionally. Some of them are Sun TV, Star Vijay, and Zee Tamil. You can watch Boologam Tamil movie on these channels by tuning in at the right time or recording it on your DVR.
        • -
        • Watch Boologam Tamil movie on DVD or Blu-ray. You can also buy or rent Boologam Tamil movie on DVD or Blu-ray from your local store or online. You can watch Boologam Tamil movie on your DVD or Blu-ray player or on your computer using a compatible software.
        • -
        - -

        These are some of the alternative ways to watch Boologam Tamil movie online or offline legally and safely. However, you should also keep in mind that these ways may not offer the same quality and convenience as downloading Boologam Tamil movie using Tamilrockers torrent site. You may also have to pay more money or wait longer to watch Boologam Tamil movie using these ways.

        - -

        Therefore, the choice is yours. You can either download Boologam Tamil movie using Tamilrockers torrent site or use one of the alternative ways to watch it online or offline. Whatever you choose, we hope you enjoy watching Boologam Tamil movie and appreciate its story and message.

        -

        Why You Should Watch Boologam Tamil Movie

        - -

        Boologam Tamil movie is not just another action film. It is a film that has a lot of substance and meaning behind its scenes. Here are some of the reasons why you should watch Boologam Tamil movie:

        - -
          -
        • Boologam Tamil movie is a tribute to the sport of boxing and its legends. The film is inspired by the life and career of Muhammad Ali, one of the greatest boxers of all time. The film also features cameo appearances by some of the famous boxers from India, such as Mike Tyson, Nathan Jones, and Prakash Raj.
        • -
        • Boologam Tamil movie is a commentary on the social issues and problems faced by the people of India. The film exposes the corruption and greed of the powerful and wealthy, who exploit the poor and helpless for their own gain. The film also highlights the importance of fighting for one's rights and dignity, and standing up against injustice and oppression.
        • -
        • Boologam Tamil movie is a showcase of the talent and skills of Jayam Ravi and Trisha Krishnan. The lead actors of the film have delivered stellar performances in their roles as Boologam and Sindhu, respectively. Jayam Ravi has undergone a physical transformation to play the role of a boxer, and has performed his own stunts and fight scenes. Trisha Krishnan has portrayed the role of a journalist and a love interest of Boologam, and has shown her versatility and charm.
        • -
        • Boologam Tamil movie is a feast for the eyes and ears. The film has been shot in various locations across India, such as Chennai, Mumbai, Delhi, Kolkata, and Hyderabad. The film also features some stunning visuals and cinematography, capturing the essence and beauty of each place. The film also has a catchy and melodious soundtrack, composed by Srikanth Deva, that complements the mood and tone of the film.
        • -
        - -

        These are some of the reasons why you should watch Boologam Tamil movie. It is a film that will entertain you, inspire you, and make you think. It is a film that will make you proud of being an Indian.

        -

        How to Watch Boologam Tamil Movie Legally and Safely

        - -

        As we have mentioned earlier, downloading Boologam Tamil movie using Tamilrockers torrent site is illegal and risky. You may face legal consequences or cyberattacks if you do so. Therefore, we recommend that you watch Boologam Tamil movie legally and safely. Here are some of the ways to do so:

        - -
          -
        • Buy or rent Boologam Tamil movie from official platforms. You can buy or rent Boologam Tamil movie from platforms such as Amazon Prime Video, Google Play Movies, iTunes, and YouTube. You can watch Boologam Tamil movie on these platforms by paying a reasonable price or subscribing to their service. You can also download Boologam Tamil movie on some of these platforms for offline viewing.
        • -
        • Watch Boologam Tamil movie on OTT platforms. You can also watch Boologam Tamil movie on OTT platforms such as Hotstar, Zee5, and Sun NXT. These platforms offer Boologam Tamil movie for streaming along with other movies, TV shows, web series, and live sports. You can watch Boologam Tamil movie on these platforms by subscribing to their service or using a free trial.
        • -
        • Watch Boologam Tamil movie on legal websites. You can also watch Boologam Tamil movie on legal websites such as Einthusan, Hungama, and Yupp TV. These websites offer Boologam Tamil movie for streaming or download along with other movies, TV shows, web series, and music in various languages. You can watch Boologam Tamil movie on these websites by registering for free or paying a nominal fee.
        • -
        - -

        These are some of the ways to watch Boologam Tamil movie legally and safely. By watching Boologam Tamil movie legally and safely, you can enjoy the film without any worries or guilt. You can also support the makers and actors of Boologam Tamil movie and encourage them to make more quality films in the future.

        -

        Conclusion

        - -

        Boologam Tamil movie is a must-watch film for anyone who loves action, drama, and social message. It is a film that celebrates the sport of boxing and its legends, exposes the corruption and greed of the powerful and wealthy, showcases the talent and skills of Jayam Ravi and Trisha Krishnan, and offers a feast for the eyes and ears.

        - -

        If you want to watch Boologam Tamil movie online or offline, you have two options. You can either download Boologam Tamil movie using Tamilrockers torrent site or use one of the alternative ways to watch it legally and safely. Both options have their own advantages and disadvantages, so you should choose wisely.

        - -

        We hope this article has helped you learn how to download Boologam Tamil movie using Tamilrockers torrent site and how to watch it legally and safely. If you have any questions or suggestions, feel free to leave a comment below. Thank you for reading and happy watching!

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Hamsterball Gold Full Version Arcade.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Hamsterball Gold Full Version Arcade.md deleted file mode 100644 index ba3b6b94bd842b6227c7a0c0b0a11890b9b7cf6a..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Download Hamsterball Gold Full Version Arcade.md +++ /dev/null @@ -1,12 +0,0 @@ -

        Download Hamsterball Gold Full Version Arcade


        Download Zip ===> https://urlgoal.com/2uCKkn



        -
        -December 10, 2019 - Hamsterball Gold Game for PC. Hamsterball Gold is a size 6 platform game by Raptisoft. An unregistered version of this cute shareware ... Hamsterball Gold Game. -This feature is not available right now. -Please try again later. -Hamsterball Gold is a size 6 platform game from Raptisoft. An unregistered version of this cute shareware program ... -Jan 14, 2013 ... -Description: Hamsterball Gold is a size 6 platform game from Raptisoft. An unregistered version of this cute shareware program ... -Hamsterball Gold - free download without registration ... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/render.py b/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/render.py deleted file mode 100644 index 57c219386c9bc0adb1ee78dd1c31a6fbf0dd1b3d..0000000000000000000000000000000000000000 --- a/spaces/rfrossard/Image-and-3D-Model-Creator/PIFu/lib/renderer/gl/render.py +++ /dev/null @@ -1,310 +0,0 @@ -from ctypes import * - -import numpy as np -from .framework import * - -GLUT = None - -# NOTE: Render class assumes GL context is created already. -class Render: - def __init__(self, width=1600, height=1200, name='GL Renderer', - program_files=['simple.fs', 'simple.vs'], color_size=1, ms_rate=1, egl=False): - self.width = width - self.height = height - self.name = name - self.use_inverse_depth = False - self.egl = egl - - glEnable(GL_DEPTH_TEST) - - glClampColor(GL_CLAMP_READ_COLOR, GL_FALSE) - glClampColor(GL_CLAMP_FRAGMENT_COLOR, GL_FALSE) - glClampColor(GL_CLAMP_VERTEX_COLOR, GL_FALSE) - - # init program - shader_list = [] - - for program_file in program_files: - _, ext = os.path.splitext(program_file) - if ext == '.vs': - shader_list.append(loadShader(GL_VERTEX_SHADER, program_file)) - elif ext == '.fs': - shader_list.append(loadShader(GL_FRAGMENT_SHADER, program_file)) - elif ext == '.gs': - shader_list.append(loadShader(GL_GEOMETRY_SHADER, program_file)) - - self.program = createProgram(shader_list) - - for shader in shader_list: - glDeleteShader(shader) - - # Init uniform variables - self.model_mat_unif = glGetUniformLocation(self.program, 'ModelMat') - self.persp_mat_unif = glGetUniformLocation(self.program, 'PerspMat') - - self.vertex_buffer = glGenBuffers(1) - - # Init screen quad program and buffer - self.quad_program, self.quad_buffer = self.init_quad_program() - - # Configure frame buffer - self.frame_buffer = glGenFramebuffers(1) - glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer) - - self.intermediate_fbo = None - if ms_rate > 1: - # Configure texture buffer to render to - self.color_buffer = [] - for i in range(color_size): - color_buffer = glGenTextures(1) - multi_sample_rate = ms_rate - glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, color_buffer) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) - glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, multi_sample_rate, GL_RGBA32F, self.width, self.height, GL_TRUE) - glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0) - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D_MULTISAMPLE, color_buffer, 0) - self.color_buffer.append(color_buffer) - - self.render_buffer = glGenRenderbuffers(1) - glBindRenderbuffer(GL_RENDERBUFFER, self.render_buffer) - glRenderbufferStorageMultisample(GL_RENDERBUFFER, multi_sample_rate, GL_DEPTH24_STENCIL8, self.width, self.height) - glBindRenderbuffer(GL_RENDERBUFFER, 0) - glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, self.render_buffer) - - attachments = [] - for i in range(color_size): - attachments.append(GL_COLOR_ATTACHMENT0 + i) - glDrawBuffers(color_size, attachments) - glBindFramebuffer(GL_FRAMEBUFFER, 0) - - self.intermediate_fbo = glGenFramebuffers(1) - glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo) - - self.screen_texture = [] - for i in range(color_size): - screen_texture = glGenTextures(1) - glBindTexture(GL_TEXTURE_2D, screen_texture) - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, screen_texture, 0) - self.screen_texture.append(screen_texture) - - glDrawBuffers(color_size, attachments) - glBindFramebuffer(GL_FRAMEBUFFER, 0) - else: - self.color_buffer = [] - for i in range(color_size): - color_buffer = glGenTextures(1) - glBindTexture(GL_TEXTURE_2D, color_buffer) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) - glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, self.width, self.height, 0, GL_RGBA, GL_FLOAT, None) - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, color_buffer, 0) - self.color_buffer.append(color_buffer) - - # Configure depth texture map to render to - self.depth_buffer = glGenTextures(1) - glBindTexture(GL_TEXTURE_2D, self.depth_buffer) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) - glTexParameteri(GL_TEXTURE_2D, GL_DEPTH_TEXTURE_MODE, GL_INTENSITY) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE) - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL) - glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, self.width, self.height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, None) - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, self.depth_buffer, 0) - - attachments = [] - for i in range(color_size): - attachments.append(GL_COLOR_ATTACHMENT0 + i) - glDrawBuffers(color_size, attachments) - self.screen_texture = self.color_buffer - - glBindFramebuffer(GL_FRAMEBUFFER, 0) - - - # Configure texture buffer if needed - self.render_texture = None - - # NOTE: original render_texture only support one input - # this is tentative member of this issue - self.render_texture_v2 = {} - - # Inner storage for buffer data - self.vertex_data = None - self.vertex_dim = None - self.n_vertices = None - - self.model_view_matrix = None - self.projection_matrix = None - - if not egl: - global GLUT - import OpenGL.GLUT as GLUT - GLUT.glutDisplayFunc(self.display) - - - def init_quad_program(self): - shader_list = [] - - shader_list.append(loadShader(GL_VERTEX_SHADER, "quad.vs")) - shader_list.append(loadShader(GL_FRAGMENT_SHADER, "quad.fs")) - - the_program = createProgram(shader_list) - - for shader in shader_list: - glDeleteShader(shader) - - # vertex attributes for a quad that fills the entire screen in Normalized Device Coordinates. - # positions # texCoords - quad_vertices = np.array( - [-1.0, 1.0, 0.0, 1.0, - -1.0, -1.0, 0.0, 0.0, - 1.0, -1.0, 1.0, 0.0, - - -1.0, 1.0, 0.0, 1.0, - 1.0, -1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0] - ) - - quad_buffer = glGenBuffers(1) - glBindBuffer(GL_ARRAY_BUFFER, quad_buffer) - glBufferData(GL_ARRAY_BUFFER, quad_vertices, GL_STATIC_DRAW) - - glBindBuffer(GL_ARRAY_BUFFER, 0) - - return the_program, quad_buffer - - def set_mesh(self, vertices, faces): - self.vertex_data = vertices[faces.reshape([-1])] - self.vertex_dim = self.vertex_data.shape[1] - self.n_vertices = self.vertex_data.shape[0] - - glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer) - glBufferData(GL_ARRAY_BUFFER, self.vertex_data, GL_STATIC_DRAW) - - glBindBuffer(GL_ARRAY_BUFFER, 0) - - def set_viewpoint(self, projection, model_view): - self.projection_matrix = projection - self.model_view_matrix = model_view - - def draw_init(self): - glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer) - glEnable(GL_DEPTH_TEST) - - glClearColor(0.0, 0.0, 0.0, 0.0) - if self.use_inverse_depth: - glDepthFunc(GL_GREATER) - glClearDepth(0.0) - else: - glDepthFunc(GL_LESS) - glClearDepth(1.0) - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - def draw_end(self): - if self.intermediate_fbo is not None: - for i in range(len(self.color_buffer)): - glBindFramebuffer(GL_READ_FRAMEBUFFER, self.frame_buffer) - glReadBuffer(GL_COLOR_ATTACHMENT0 + i) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.intermediate_fbo) - glDrawBuffer(GL_COLOR_ATTACHMENT0 + i) - glBlitFramebuffer(0, 0, self.width, self.height, 0, 0, self.width, self.height, GL_COLOR_BUFFER_BIT, GL_NEAREST) - - glBindFramebuffer(GL_FRAMEBUFFER, 0) - glDepthFunc(GL_LESS) - glClearDepth(1.0) - - def draw(self): - self.draw_init() - - glUseProgram(self.program) - glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose()) - glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose()) - - glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer) - - glEnableVertexAttribArray(0) - glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None) - - glDrawArrays(GL_TRIANGLES, 0, self.n_vertices) - - glDisableVertexAttribArray(0) - - glBindBuffer(GL_ARRAY_BUFFER, 0) - - glUseProgram(0) - - self.draw_end() - - def get_color(self, color_id=0): - glBindFramebuffer(GL_FRAMEBUFFER, self.intermediate_fbo if self.intermediate_fbo is not None else self.frame_buffer) - glReadBuffer(GL_COLOR_ATTACHMENT0 + color_id) - data = glReadPixels(0, 0, self.width, self.height, GL_RGBA, GL_FLOAT, outputType=None) - glBindFramebuffer(GL_FRAMEBUFFER, 0) - rgb = data.reshape(self.height, self.width, -1) - rgb = np.flip(rgb, 0) - return rgb - - def get_z_value(self): - glBindFramebuffer(GL_FRAMEBUFFER, self.frame_buffer) - data = glReadPixels(0, 0, self.width, self.height, GL_DEPTH_COMPONENT, GL_FLOAT, outputType=None) - glBindFramebuffer(GL_FRAMEBUFFER, 0) - z = data.reshape(self.height, self.width) - z = np.flip(z, 0) - return z - - def display(self): - self.draw() - - if not self.egl: - # First we draw a scene. - # Notice the result is stored in the texture buffer. - - # Then we return to the default frame buffer since we will display on the screen. - glBindFramebuffer(GL_FRAMEBUFFER, 0) - - # Do the clean-up. - glClearColor(0.0, 0.0, 0.0, 0.0) - glClear(GL_COLOR_BUFFER_BIT) - - # We draw a rectangle which covers the whole screen. - glUseProgram(self.quad_program) - glBindBuffer(GL_ARRAY_BUFFER, self.quad_buffer) - - size_of_double = 8 - glEnableVertexAttribArray(0) - glVertexAttribPointer(0, 2, GL_DOUBLE, GL_FALSE, 4 * size_of_double, None) - glEnableVertexAttribArray(1) - glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE, 4 * size_of_double, c_void_p(2 * size_of_double)) - - glDisable(GL_DEPTH_TEST) - - # The stored texture is then mapped to this rectangle. - # properly assing color buffer texture - glActiveTexture(GL_TEXTURE0) - glBindTexture(GL_TEXTURE_2D, self.screen_texture[0]) - glUniform1i(glGetUniformLocation(self.quad_program, 'screenTexture'), 0) - - glDrawArrays(GL_TRIANGLES, 0, 6) - - glDisableVertexAttribArray(1) - glDisableVertexAttribArray(0) - - glEnable(GL_DEPTH_TEST) - glBindBuffer(GL_ARRAY_BUFFER, 0) - glUseProgram(0) - - GLUT.glutSwapBuffers() - GLUT.glutPostRedisplay() - - def show(self): - if not self.egl: - GLUT.glutMainLoop() diff --git a/spaces/rinme/vits-models/text/__init__.py b/spaces/rinme/vits-models/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/rinme/vits-models/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/robyramos/analise_perfil_v2/README.md b/spaces/robyramos/analise_perfil_v2/README.md deleted file mode 100644 index 16b3ecb4af1384bc12d276f94a0ad7cbe9375a02..0000000000000000000000000000000000000000 --- a/spaces/robyramos/analise_perfil_v2/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Analise Perfil -emoji: 💻 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.32.0 -app_file: app.py -pinned: false -license: other -duplicated_from: robyramos/analise_perfil ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download Anti Deep Freeze Versi 7.30.020.3852 The Ultimate Guide to Unfreeze Your PC.md b/spaces/rorallitri/biomedical-language-models/logs/Download Anti Deep Freeze Versi 7.30.020.3852 The Ultimate Guide to Unfreeze Your PC.md deleted file mode 100644 index 7ff6ad191ac9d52494c08c10f44c83e61c040d77..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Download Anti Deep Freeze Versi 7.30.020.3852 The Ultimate Guide to Unfreeze Your PC.md +++ /dev/null @@ -1,9 +0,0 @@ - -

        anti deep freeze 7 anti deep freeze 8 anti deep freeze 7.51 anti deep freeze download anti deep freeze 7.22 anti deep freeze 0.4 anti deep freeze 7.21 anti deep freeze 6 anti deep freeze v8 anti deep freeze 2017 anti deep freeze 8 download anti deep freeze all version anti deep freeze kuyhaa anti deep freeze 7 rarDeep Freeze Standard 8.37.020.4674 indir - Deep Freeze Standart, zellikle internet kafeler iin ok faydal bir program. Deep Freeze program ile bilgisayarlarnz sanki .. Versin 7.30 Versin 7.61 . anti-deep-freezer . Hello how are you after many hours of programming at last bring them Anti Deep Freeze 2011. enjoy it .. Deep Freeze v8.30.020.4627 v8.30.020.4627 MacEasyRecovery v11.2.1 .. MidwayUSA is a privately held American retailer of various hunting and outdoor-related products.. Updated - January 8, 2017 - Faronics has released a patch utility that can be used to apply this registry key using the remote launch functions of both Faronics Core .. 7/10 (311 votes) - Download Deep Freeze Free. Deep Freeze is a tool to freeze your computer so that it isn't affected by the changes that you apply. Download Deep .. Anti Deep Freeze 7.30.020 > &nbs. Download Deep Freeze Standard 2017 for Windows. Deep Freeze offers a great deal cost efficiency for corporate, government, and training facilities for computer .. Anti Deep Freeze 7.30.020,free Anti Deep Freeze 7.30.020 download. #7 23 ub 2015 #7 faronics diye bir klasr yok kardeim I insanca yecik 29 Kas 2015 #8 . Deep Freeze 8.30'u nasl kaldrrz? Driver sormad gibi alp kapandktan .. deep freeze() .. Undeepfreeze 7.00.020.3172 Araypta Bulamayanlar iin koyuyorum.Bu program sayesinde internet kafelerdeki deep freezi krabilir ve istediiniz keyloggeri .. Deep Freeze Standard 8.38.020.4676 Final Full Version By Admin GigaPurbalingga October 23, 2017 149 Comments Deep Freeze Full Anda bosan dengan virus yang .. 8.37.020.4627.zip suone 2017-12-7 14:01:48 30.06M .. Download 30-Day Free Trial on Faronics On-Premise & Cloud Solution to protect, manage and optimize your IT assets.. la version de mi deep freeze es la 7.30.020 3852 y si conocen otro . pero cuando utilizo el anti deep freeze me aparece "operatin failed" y ya .. Faronics Deep Freeze and computer management software are perfect for educational institutions and corporations of all sizes. Power Save, Insight, Deep Freeze are .. Anti Deep Freez Standard Anti Deep Freez Standard Deep Freez Deep Freez 7 652 KB DZ-Hacker. Cmo quitar Deep Freeze 7.30 [Resuelto/Cerrado] godhwar15 2 Publicaciones mircoles, 30 de enero . Desinstalar Deep Freeze 7.30.020.3852 sin saber la contrasea. How do I uninstall Deep Freeze? Posted by Adam Zilliax, Last modified by Adam Zilliax on 10 February 2011 12:16 PM. Disable Deep Freeze before uninstalling it.. downloadantideepfreezeversi7.30.020.3852downloader.exe - Is This File Safe? . Use Emsisoft Anti-Malware to thoroughly scan your PC and clean this infection .. Deep Freeze Enterprise 8.32.220.5109 Final, download Deep Freeze Enterprise full version, . => Deep Freeze Standard 8.20.020.4589 Final Via Datafilehost Password : .. Pour eviter des faux positifs de la part de certains anti-virus le serial est present dans . Tlcharger Deep Freeze Standard 8.30.220.4627 .. Faronics Anti-Virus 4.13.2100.390 20171215 30 . Deep Freeze Deep Freeze Deep Freeze Mac Deep Freeze .. Malwarebytes Anti-Exploit Premium 1.12.1.42, . En esta versin Deep Freeze v8.38.020.4676 se han corregido varios errores. . marzo 7, 2016 0. Windows 7 USB .. Faronics Deep Freeze Standard v7.30.020.3852 . Vista and Windows 7, Deep Freeze for Windows supports multiple hard-drive . Faronics Anti-Executable v3.0.1111 .. DeepFreeze 8.38.020.4676 20171020 30 . Deep freeze .. DeepFreeze7.22.020.3453 Anti . Deep Freeze .. pc6(Deep Freeze),DeepFreeze .. Faronics Deep Freeze . Schedule Thawed Maintenance periods to perform Windows updates through the Internet or a SUS/WSUS server or Anti . (IDM) 6.30 Build 7 .. -InterMapperDeep Freeze . Faronics Deep Freeze Faronics Anti .. 7/10 (311 votos) - Descargar Deep Freeze Gratis. Deep Freeze es una herramienta para congelar tu ordenador y que no le afecten los cambios que realices. Descarga Deep . 99473d6f7e

        -

        Wavelore Pedal Steel Guitar KONTAKT DVDR [url= ]Download[/url]download waves mercury 50 full crack [url= ]coreldraw x6 portable 101[/url] Microsoft Office Professional Plus 2019 Preview Build 10301 LPs Serial Key [url= -Sdn9]Download[/url] download film barbie diamond castle subtitle indonesia [url= ]Download[/url] free download software organ tunggal di pc [url= ] [/url] Smuppogevoimuro [url= -dP1nkd-F_11393Otv]Download[/url] FS2004 FSX LatinVFR Santiago SCEL RIP Scenery [url= _gn_9b3fBLGvN2que]wakelet.com[/url] Famithebooxessonfome [url= ]wakelet[/url]toshiba e studio 306 drivers free download 7 [url= ]wakelet.com[/url] LusegreeBum [url= _0kTUyo_bKkuIl2JJ]wakelet.com[/url]

        -

        Download Anti Deep Freeze Versi 7.30.020.3852


        Downloadhttps://tinurll.com/2uzmUN



        -

        ayyappa songs lyrics in tamil pdf 97 [url= ] [/url]MegoUnorgejeava [url= -Y2_8jcXai0Y38_z]Download[/url] Speashspepejoina [url= _Cms7x0OveK8]wakelet[/url] FetOffesse [url= ]Lehrbuch Der Molekularen Zellbiologie Alberts Pdf Download[/url] edilefalkide [url= ]wakelet.com[/url] Survival Run with Bear Grylls Full Apk Hile indir [url= -mL7PE_] -mL7PE_[/url] download anti deep freeze versi 7.30.020.3852 169 [url= -SZqK4kj0sPAOpGKrh]wakelet[/url] Senha extrair UFC Undisputed 3 PC hit [url= ]wakelet[/url]spismMizCoimiphowl [url= ]Download[/url] LusegreeBum [url= ]wakelet[/url]

        -

        MutGlonsDunseno [url= -mi-nelum-98-front]Download[/url]nahjul balagha sindhi pdf download [url= -download-resident-evil-4-pc-rip]trello[/url] Speashspepejoina [url= -superior-girl-1984zip]Download[/url] partitiongratuitepianolafouleedithPiaf [url= -sanam-re-movie-download-720p-hd]Download[/url] edilefalkide [url= -joe-joe-thomas-new-man-full-album-zip]trello[/url] Smuppogevoimuro [url= -yuuyami-doori-tankentai-iso-full-version-download] -yuuyami-doori-tankentai-iso-full-version-download[/url] reedlyfeally [url= -anycount-v70-build-707worldendh33t] -anycount-v70-build-707worldendh33t[/url] Microsoft Office Pro Plus 2016 Activator [url= -global-mapper-18-free-download]Download[/url]Adobe Acrobat Pro DC 2018.011.20063 Activator [CracksMind] 64 bit [url= -plaxis2d2015crackspread]Download[/url] LusegreeBum [url= -a-short-history-of-islam-by-mazhar-ul-haq-pdf-free-36]a short history of islam by mazhar ul haq pdf free 36[/url]

        -

        MutGlonsDunseno [url= -luxor-2-hd-2013-eng-lucky-patcher] -luxor-2-hd-2013-eng-lucky-patcher[/url]Tratat De Medicina Legala Vladimir Belis Pdf 24 [url= -helabasa-2008-crack-keygen-14]helabasa 2008 crack keygen 14[/url] Speashspepejoina [url= -king-kong-movie-in-hindi-mp4-download]trello[/url] Skolnik Introduction To Radar Solution Manual 113 [url= -convert-java-to-vxp]Download[/url] kamasastrytelugukathalupdf [url= -rendering-thread-exception-fatal-error-batman-arkham-city]trello.com[/url] Smuppogevoimuro [url= -management-of-healthcare-organizationspdf]Management Of Healthcare Organizations.pdf[/url] ssshhhkoihaiserialdownload [url= -pinnacle-emptyv-51013170-14a-driver24]Download[/url] Tiger Woods Pga 12 Serial Key [url= -guiamagicapracticafernandezeditores]trello.com[/url]spismMizCoimiphowl [url= -nch-mixpad-masters-edition-5-crack-with-serial-key-full-version]Download[/url] Skalp For Sketchup 2016 Crack [url= -photomatix-pro-4-2-keygen-20]trello[/url]

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/German Teens In Pantyhose [PORTABLE].md b/spaces/rorallitri/biomedical-language-models/logs/German Teens In Pantyhose [PORTABLE].md deleted file mode 100644 index 983dd553484905c994482d943e960281c071fa0e..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/German Teens In Pantyhose [PORTABLE].md +++ /dev/null @@ -1,11 +0,0 @@ - -

        i am searching a german teen movie from late 90's or early 2000's, where a white and a black girl making the driving licence.
        Movie starts where the girls drinking a coke and to pay it, the white girl goes with the owner in a room and he looks her masturbating.
        Hope anybody can help me, thanks. reply favorite add to gallery permalink Share Copy Quote Strike Insert Image url Insert Insert link url Insert Post as Anonymous Attachments are disabled for system maintenance.

        -

        Find video? I watched a video a while back. It was a 2 men for 1 teen. I remember the ending because it was funny. She didnt realize that the scene would be on the internet, which means her family would see it. I believe they may have spoken german or another foreign language.

        -

        German Teens In Pantyhose


        Download - https://tinurll.com/2uzmki



        -

        Anybody remember the clip that was on hear a while ago taken from a french/german film (looked 80's ish) where a youngish boy/teen was in bed naked with an older naked woman and she is stroking his cock. She then tells him to touch her like she had showed him before. Anybody got a link or know the movie?

        -

        Im looking for a video that i havent seen in quite some time but is one of my favorites, it used to show up as a thumbnail for some of the "friends" sites on here and it was a blonde german teen laying on her stomach looking at the camera getting fucked in the ass by 2 guys with cum dripping out of her ass. There was a yellow tint to the video. thanks if anyone can help reply favorite add to gallery permalink Share Copy Quote Strike Insert Image url Insert Insert link url Insert Post as Anonymous Attachments are disabled for system maintenance.

        -

        -

        does anyone know any german teen erotic stories (boards)! reply favorite add to gallery permalink Share Copy Quote Strike Insert Image url Insert Insert link url Insert Post as Anonymous Attachments are disabled for system maintenance.

        -

        I have been looking for years for this one german teen bate clip. it was once briefly hosted under the title "kleine natalie erste erfahrungen" on another site but the user closed their account. Cute little brunette with a huge dildo. Saw it back in my kazza days I think. Can anyone hook me up? reply favorite add to gallery permalink Share Copy Quote Strike Insert Image url Insert Insert link url Insert Post as Anonymous Attachments are disabled for system maintenance.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/How to Get Loki Download No Survey No Password in Minutes.md b/spaces/rorallitri/biomedical-language-models/logs/How to Get Loki Download No Survey No Password in Minutes.md deleted file mode 100644 index 869effcd05b84909317337d474be6285f7270bac..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/How to Get Loki Download No Survey No Password in Minutes.md +++ /dev/null @@ -1,6 +0,0 @@ - -

        In order to submit data, you will visit an online survey and enter a password that you will receive through email where you can then enter your results from the season. If you prefer, you can print out the Data Collection Sheet Example and mail it to us at the address below:

        -

        Loki Download No Survey No Password


        Download File ►►►►► https://tinurll.com/2uznGZ



        -

        Once a device is compromised, the malware uses a keylogger to steal stored passwords and credentials in victims' web browsers as well the device itself, according to CISA. In addition to data exfiltration, LokiBot acts as a backdoor to download other malicious payloads.

        aaccfb2cb3
        -
        -
        \ No newline at end of file diff --git a/spaces/roshithindia/image_classification/app.py b/spaces/roshithindia/image_classification/app.py deleted file mode 100644 index 7fad045985a3a12ddc3b1c5d71b9b14ac0fa4fed..0000000000000000000000000000000000000000 --- a/spaces/roshithindia/image_classification/app.py +++ /dev/null @@ -1,16 +0,0 @@ -import streamlit as st -from transformers import ViTImageProcessor, ViTForImageClassification -from PIL import Image as img - -x = st.file_uploader("Upload Images", type=["png","jpg","jpeg"]) -if x is not None: - st.image(img.open(x),width=255) - i = img.open(x) - processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') - model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224') - inputs = processor(images=i, return_tensors="pt") - outputs = model(**inputs) - logits = outputs.logits - predicted_class_idx = logits.argmax(-1).item() - st.text("Our Model Predicts : ") - st.write(model.config.id2label[predicted_class_idx]) \ No newline at end of file diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/viz/latent_widget.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/viz/latent_widget.py deleted file mode 100644 index 32c743bdbcac8a12425f8e5b32b9ea2d4612365d..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/viz/latent_widget.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_utils - -#---------------------------------------------------------------------------- - -class LatentWidget: - def __init__(self, viz): - self.viz = viz - self.latent = dnnlib.EasyDict(x=0, y=0, anim=False, speed=0.25) - self.latent_def = dnnlib.EasyDict(self.latent) - self.step_y = 100 - - def drag(self, dx, dy): - viz = self.viz - self.latent.x += dx / viz.font_size * 4e-2 - self.latent.y += dy / viz.font_size * 4e-2 - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - if show: - imgui.text('Latent') - imgui.same_line(viz.label_w) - seed = round(self.latent.x) + round(self.latent.y) * self.step_y - with imgui_utils.item_width(viz.font_size * 8): - changed, seed = imgui.input_int('##seed', seed) - if changed: - self.latent.x = seed - self.latent.y = 0 - imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) - frac_x = self.latent.x - round(self.latent.x) - frac_y = self.latent.y - round(self.latent.y) - with imgui_utils.item_width(viz.font_size * 5): - changed, (new_frac_x, new_frac_y) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) - if changed: - self.latent.x += new_frac_x - frac_x - self.latent.y += new_frac_y - frac_y - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) - _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) - if dragging: - self.drag(dx, dy) - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) - _clicked, self.latent.anim = imgui.checkbox('Anim', self.latent.anim) - imgui.same_line(round(viz.font_size * 27.7)) - with imgui_utils.item_width(-1 - viz.button_w * 2 - viz.spacing * 2), imgui_utils.grayed_out(not self.latent.anim): - changed, speed = imgui.slider_float('##speed', self.latent.speed, -5, 5, format='Speed %.3f', power=3) - if changed: - self.latent.speed = speed - imgui.same_line() - snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y)) - if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)): - self.latent = snapped - imgui.same_line() - if imgui_utils.button('Reset', width=-1, enabled=(self.latent != self.latent_def)): - self.latent = dnnlib.EasyDict(self.latent_def) - - if self.latent.anim: - self.latent.x += viz.frame_delta * self.latent.speed - viz.args.w0_seeds = [] # [[seed, weight], ...] - for ofs_x, ofs_y in [[0, 0], [1, 0], [0, 1], [1, 1]]: - seed_x = np.floor(self.latent.x) + ofs_x - seed_y = np.floor(self.latent.y) + ofs_y - seed = (int(seed_x) + int(seed_y) * self.step_y) & ((1 << 32) - 1) - weight = (1 - abs(self.latent.x - seed_x)) * (1 - abs(self.latent.y - seed_y)) - if weight > 0: - viz.args.w0_seeds.append([seed, weight]) - -#---------------------------------------------------------------------------- diff --git a/spaces/rstallman/Mayfair-Partner-Music/CONTRIBUTING.md b/spaces/rstallman/Mayfair-Partner-Music/CONTRIBUTING.md deleted file mode 100644 index 55b99140204d785d572ada9761dd77f302ae31c6..0000000000000000000000000000000000000000 --- a/spaces/rstallman/Mayfair-Partner-Music/CONTRIBUTING.md +++ /dev/null @@ -1,35 +0,0 @@ -# Contributing to Audiocraft - -We want to make contributing to this project as easy and transparent as -possible. - -## Pull Requests - -Audiocraft is the implementation of a research paper. -Therefore, we do not plan on accepting many pull requests for new features. -We certainly welcome them for bug fixes. - -1. Fork the repo and create your branch from `main`. -2. If you've added code that should be tested, add tests. -3. If you've changed APIs, update the documentation. -4. Ensure the test suite passes. -5. Make sure your code lints. -6. If you haven't already, complete the Contributor License Agreement ("CLA"). - -## Contributor License Agreement ("CLA") -In order to accept your pull request, we need you to submit a CLA. You only need -to do this once to work on any of Meta's open source projects. - -Complete your CLA here: - -## Issues -We use GitHub issues to track public bugs. Please ensure your description is -clear and has sufficient instructions to be able to reproduce the issue. - -Meta has a [bounty program](https://www.facebook.com/whitehat/) for the safe -disclosure of security bugs. In those cases, please go through the process -outlined on that page and do not file a public issue. - -## License -By contributing to encodec, you agree that your contributions will be licensed -under the LICENSE file in the root directory of this source tree. diff --git a/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h deleted file mode 100644 index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000 --- a/spaces/sam-hq-team/sam-hq/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h +++ /dev/null @@ -1,35 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino diff --git a/spaces/sana123/Sinhala_Audio-to-Text/app.py b/spaces/sana123/Sinhala_Audio-to-Text/app.py deleted file mode 100644 index 69eea6bfc3c86c92d81d1df92ca66a276e0601a4..0000000000000000000000000000000000000000 --- a/spaces/sana123/Sinhala_Audio-to-Text/app.py +++ /dev/null @@ -1,109 +0,0 @@ -import torch - -import gradio as gr -import pytube as pt -from transformers import pipeline - -MODEL_NAME = "Subhaka/whisper-small-Sinhala-Fine_Tune" - -device = 0 if torch.cuda.is_available() else "cpu" - -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, -) - - -all_special_ids = pipe.tokenizer.all_special_ids -transcribe_token_id = all_special_ids[-5] -translate_token_id = all_special_ids[-6] - - -def transcribe(microphone, file_upload, task): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]] - - text = pipe(file)["text"] - - return warn_output + text - - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
        ' - "
        " - ) - return HTML_str - - -def yt_transcribe(yt_url, task): - yt = pt.YouTube(yt_url) - html_embed_str = _return_yt_html_embed(yt_url) - stream = yt.streams.filter(only_audio=True)[0] - stream.download(filename="audio.mp3") - - pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]] - - text = pipe("audio.mp3")["text"] - - return html_embed_str, text - - -demo = gr.Blocks() - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="Audio-to-Text Playground: Transcribe Audio", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the" - f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" - " of arbitrary length." - ), - allow_flagging="never", -) - -yt_transcribe = gr.Interface( - fn=yt_transcribe, - inputs=[ - gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe") - ], - outputs=["html", "text"], - layout="horizontal", - theme="huggingface", - title="Audio-to-Text Playground: Transcribe YouTube", - description=( - "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint" - f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of" - " arbitrary length." - ), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"]) - -demo.launch(enable_queue=True) - diff --git a/spaces/sanchanhart/Warehouse_Apparel_Detection/app.py b/spaces/sanchanhart/Warehouse_Apparel_Detection/app.py deleted file mode 100644 index 77e9e7e0b0a9d78c6df5014658d050e6de7a0a3d..0000000000000000000000000000000000000000 --- a/spaces/sanchanhart/Warehouse_Apparel_Detection/app.py +++ /dev/null @@ -1,47 +0,0 @@ -from charset_normalizer import detect -import numpy as np -import gradio as gr -import torch -import torch.nn as nn -import cv2 -import os -from numpy import random -from metadata.utils.utils import decodeImage -from metadata.predictor_yolo_detector.detector_test import Detector -from PIL import Image - -class ClientApp: - def __init__(self): - self.filename = "inputImage.jpg" - #modelPath = 'research/ssd_mobilenet_v1_coco_2017_11_17' - self.objectDetection = Detector(self.filename) - - - - -clApp = ClientApp() - -def predict_image(input_img): - - img = Image.fromarray(input_img) - img.save("./metadata/predictor_yolo_detector/inference/images/"+ clApp.filename) - resultant_img = clApp.objectDetection.detect_action() - - - return resultant_img - -demo = gr.Blocks() - -with demo: - gr.Markdown( - """ -

        Warehouse Apparel Detection

        - """) - - detect = gr.Interface(predict_image, 'image', 'image', examples=[ - os.path.join(os.path.dirname(__file__), "images/image_1.jpg"), - os.path.join(os.path.dirname(__file__), "images/image_2.jpg"), - os.path.join(os.path.dirname(__file__), "images/image_3.jpg") - ]) - -demo.launch() \ No newline at end of file diff --git a/spaces/sanjayw/starchat-playground/app.py b/spaces/sanjayw/starchat-playground/app.py deleted file mode 100644 index 7e5f437c674e21e116c1bb62e18935322e4270e0..0000000000000000000000000000000000000000 --- a/spaces/sanjayw/starchat-playground/app.py +++ /dev/null @@ -1,347 +0,0 @@ -import datetime -import json -import os -import shutil - -import gradio as gr -from huggingface_hub import Repository -from text_generation import Client - -from dialogues import DialogueTemplate -from share_btn import (community_icon_html, loading_icon_html, share_btn_css, - share_js) - -HF_TOKEN = os.environ.get("HF_TOKEN", None) -API_TOKEN = os.environ.get("API_TOKEN", None) -API_URL = os.environ.get("API_URL", None) - -client = Client( - API_URL, - headers={"Authorization": f"Bearer {API_TOKEN}"}, -) - -repo = None -if HF_TOKEN: - try: - shutil.rmtree("./data/") - except: - pass - - repo = Repository( - local_dir="./data/", clone_from="HuggingFaceH4/starchat-prompts", use_auth_token=HF_TOKEN, repo_type="dataset" - ) - repo.git_pull() - - -def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs): - current_hour = now.strftime("%Y-%m-%d_%H") - file_name = f"prompts_{current_hour}.jsonl" - - if repo is not None: - repo.git_pull(rebase=True) - with open(os.path.join("data", file_name), "a") as f: - json.dump({"inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs}, f, ensure_ascii=False) - f.write("\n") - repo.push_to_hub() - - -def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep): - past = [] - for data in chatbot: - user_data, model_data = data - - if not user_data.startswith(user_name): - user_data = user_name + user_data - if not model_data.startswith(sep + assistant_name): - model_data = sep + assistant_name + model_data - - past.append(user_data + model_data.rstrip() + sep) - - if not inputs.startswith(user_name): - inputs = user_name + inputs - - total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip() - - return total_inputs - - -def has_no_history(chatbot, history): - return not chatbot and not history - - -def generate( - system_message, - user_message, - chatbot, - history, - temperature, - top_k, - top_p, - max_new_tokens, - repetition_penalty, - do_save=True, -): - # Don't return meaningless message when the input is empty - if not user_message: - print("Empty input") - - history.append(user_message) - - past_messages = [] - for data in chatbot: - user_data, model_data = data - - past_messages.extend( - [{"role": "user", "content": user_data}, {"role": "assistant", "content": model_data.rstrip()}] - ) - - if len(past_messages) < 1: - dialogue_template = DialogueTemplate( - system=system_message, messages=[{"role": "user", "content": user_message}] - ) - prompt = dialogue_template.get_inference_prompt() - else: - dialogue_template = DialogueTemplate( - system=system_message, messages=past_messages + [{"role": "user", "content": user_message}] - ) - prompt = dialogue_template.get_inference_prompt() - - generate_kwargs = { - "temperature": temperature, - "top_k": top_k, - "top_p": top_p, - "max_new_tokens": max_new_tokens, - } - - temperature = float(temperature) - if temperature < 1e-2: - temperature = 1e-2 - top_p = float(top_p) - - generate_kwargs = dict( - temperature=temperature, - max_new_tokens=max_new_tokens, - top_p=top_p, - repetition_penalty=repetition_penalty, - do_sample=True, - truncate=999, - seed=42, - stop_sequences=["<|end|>"], - ) - - stream = client.generate_stream( - prompt, - **generate_kwargs, - ) - - output = "" - for idx, response in enumerate(stream): - if response.token.special: - continue - output += response.token.text - if idx == 0: - history.append(" " + output) - else: - history[-1] = output - - chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)] - - yield chat, history, user_message, "" - - if HF_TOKEN and do_save: - try: - now = datetime.datetime.now() - current_time = now.strftime("%Y-%m-%d %H:%M:%S") - print(f"[{current_time}] Pushing prompt and completion to the Hub") - save_inputs_and_outputs(now, prompt, output, generate_kwargs) - except Exception as e: - print(e) - - return chat, history, user_message, "" - - -examples = [ - "How can I write a Python function to generate the nth Fibonacci number?", - "How do I get the current date using shell commands? Explain how it works.", - "What's the meaning of life?", - "Write a function in Javascript to reverse words in a given string.", - "Give the following data {'Name':['Tom', 'Brad', 'Kyle', 'Jerry'], 'Age':[20, 21, 19, 18], 'Height' : [6.1, 5.9, 6.0, 6.1]}. Can you plot one graph with two subplots as columns. The first is a bar graph showing the height of each person. The second is a bargraph showing the age of each person? Draw the graph in seaborn talk mode.", - "Create a regex to extract dates from logs", - "How to decode JSON into a typescript object", - "Write a list into a jsonlines file and save locally", -] - - -def clear_chat(): - return [], [] - - -def process_example(args): - for [x, y] in generate(args): - pass - return [x, y] - - -title = """

        ⭐ StarChat Playground 💬

        """ -custom_css = """ -#banner-image { - display: block; - margin-left: auto; - margin-right: auto; -} - -#chat-message { - font-size: 14px; - min-height: 300px; -} -""" - -with gr.Blocks(analytics_enabled=False, css=custom_css) as demo: - gr.HTML(title) - - with gr.Row(): - with gr.Column(): - gr.Image("thumbnail.png", elem_id="banner-image", show_label=False) - with gr.Column(): - gr.Markdown( - """ - 💻 This demo showcases an **alpha** version of **[StarChat](https://huggingface.co/HuggingFaceH4/starchat-alpha)**, a variant of **[StarCoderBase](https://huggingface.co/bigcode/starcoderbase)** that was fine-tuned on the [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) and [OpenAssistant](https://huggingface.co/datasets/OpenAssistant/oasst1) datasets to act as a helpful coding assistant. The base model has 16B parameters and was pretrained on one trillion tokens sourced from 80+ programming languages, GitHub issues, Git commits, and Jupyter notebooks (all permissively licensed). - - 📝 For more details, check out our [blog post](https://huggingface.co/blog/starchat-alpha). - - ⚠️ **Intended Use**: this app and its [supporting model](https://huggingface.co/HuggingFaceH4/starchat-alpha) are provided as educational tools to explain large language model fine-tuning; not to serve as replacement for human expertise. - - ⚠️ **Known Failure Modes**: this alpha version of **StarChat** has not been aligned to human preferences with techniques like RLHF, so the model can produce problematic outputs (especially when prompted to do so). Since the base model was pretrained on a large corpus of code, it may produce code snippets that are syntactically valid but semantically incorrect. For example, it may produce code that does not compile or that produces incorrect results. It may also produce code that is vulnerable to security exploits. We have observed the model also has a tendency to produce false URLs which should be carefully inspected before clicking. For more details on the model's limitations in terms of factuality and biases, see the [model card](https://huggingface.co/HuggingFaceH4/starchat-alpha#bias-risks-and-limitations). - - ⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the model. Do **NOT** share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below. - """ - ) - - with gr.Row(): - do_save = gr.Checkbox( - value=True, - label="Store data", - info="You agree to the storage of your prompt and generated text for research and development purposes:", - ) - with gr.Accordion(label="System Prompt", open=False, elem_id="parameters-accordion"): - system_message = gr.Textbox( - elem_id="system-message", - placeholder="Below is a conversation between a human user and a helpful AI coding assistant.", - show_label=False, - ) - with gr.Row(): - with gr.Box(): - output = gr.Markdown() - chatbot = gr.Chatbot(elem_id="chat-message", label="Chat") - - with gr.Row(): - with gr.Column(scale=3): - user_message = gr.Textbox(placeholder="Enter your message here", show_label=False, elem_id="q-input") - with gr.Row(): - send_button = gr.Button("Send", elem_id="send-btn", visible=True) - - # regenerate_button = gr.Button("Regenerate", elem_id="send-btn", visible=True) - - clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True) - - with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"): - temperature = gr.Slider( - label="Temperature", - value=0.2, - minimum=0.0, - maximum=1.0, - step=0.1, - interactive=True, - info="Higher values produce more diverse outputs", - ) - top_k = gr.Slider( - label="Top-k", - value=50, - minimum=0.0, - maximum=100, - step=1, - interactive=True, - info="Sample from a shortlist of top-k tokens", - ) - top_p = gr.Slider( - label="Top-p (nucleus sampling)", - value=0.95, - minimum=0.0, - maximum=1, - step=0.05, - interactive=True, - info="Higher values sample more low-probability tokens", - ) - max_new_tokens = gr.Slider( - label="Max new tokens", - value=512, - minimum=0, - maximum=512, - step=4, - interactive=True, - info="The maximum numbers of new tokens", - ) - repetition_penalty = gr.Slider( - label="Repetition Penalty", - value=1.2, - minimum=0.0, - maximum=10, - step=0.1, - interactive=True, - info="The parameter for repetition penalty. 1.0 means no penalty.", - ) - # with gr.Group(elem_id="share-btn-container"): - # community_icon = gr.HTML(community_icon_html, visible=True) - # loading_icon = gr.HTML(loading_icon_html, visible=True) - # share_button = gr.Button("Share to community", elem_id="share-btn", visible=True) - with gr.Row(): - gr.Examples( - examples=examples, - inputs=[user_message], - cache_examples=False, - fn=process_example, - outputs=[output], - ) - - history = gr.State([]) - # To clear out "message" input textbox and use this to regenerate message - last_user_message = gr.State("") - - user_message.submit( - generate, - inputs=[ - system_message, - user_message, - chatbot, - history, - temperature, - top_k, - top_p, - max_new_tokens, - repetition_penalty, - do_save, - ], - outputs=[chatbot, history, last_user_message, user_message], - ) - - send_button.click( - generate, - inputs=[ - system_message, - user_message, - chatbot, - history, - temperature, - top_k, - top_p, - max_new_tokens, - repetition_penalty, - do_save, - ], - outputs=[chatbot, history, last_user_message, user_message], - ) - - clear_chat_button.click(clear_chat, outputs=[chatbot, history]) - # share_button.click(None, [], [], _js=share_js) - -demo.queue(concurrency_count=16).launch(debug=True) diff --git a/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ProsodicConditionExtractor.py b/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ProsodicConditionExtractor.py deleted file mode 100644 index e1456363449c6c0dae0a56bde1a5d746e8b129a6..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization/IMSToucan/Preprocessing/ProsodicConditionExtractor.py +++ /dev/null @@ -1,40 +0,0 @@ -import soundfile as sf -import torch -import torch.multiprocessing -import torch.multiprocessing -from numpy import trim_zeros -from speechbrain.pretrained import EncoderClassifier - -from .AudioPreprocessor import AudioPreprocessor - - -class ProsodicConditionExtractor: - - def __init__(self, sr, device=torch.device("cpu")): - self.ap = AudioPreprocessor(input_sr=sr, output_sr=16000, melspec_buckets=80, hop_length=256, n_fft=1024, cut_silence=False) - # https://huggingface.co/speechbrain/spkrec-ecapa-voxceleb - self.speaker_embedding_func_ecapa = EncoderClassifier.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", - run_opts={"device": str(device)}, - savedir="Models/SpeakerEmbedding/speechbrain_speaker_embedding_ecapa") - # https://huggingface.co/speechbrain/spkrec-xvect-voxceleb - self.speaker_embedding_func_xvector = EncoderClassifier.from_hparams(source="speechbrain/spkrec-xvect-voxceleb", - run_opts={"device": str(device)}, - savedir="Models/SpeakerEmbedding/speechbrain_speaker_embedding_xvector") - - def extract_condition_from_reference_wave(self, wave, already_normalized=False): - if already_normalized: - norm_wave = wave - else: - norm_wave = self.ap.audio_to_wave_tensor(normalize=True, audio=wave) - norm_wave = torch.tensor(trim_zeros(norm_wave.numpy())) - spk_emb_ecapa = self.speaker_embedding_func_ecapa.encode_batch(wavs=norm_wave.unsqueeze(0)).squeeze() - spk_emb_xvector = self.speaker_embedding_func_xvector.encode_batch(wavs=norm_wave.unsqueeze(0)).squeeze() - combined_utt_condition = torch.cat([spk_emb_ecapa.cpu(), - spk_emb_xvector.cpu()], dim=0) - return combined_utt_condition - - -if __name__ == '__main__': - wave, sr = sf.read("../audios/1.wav") - ext = ProsodicConditionExtractor(sr=sr) - print(ext.extract_condition_from_reference_wave(wave=wave).shape) diff --git a/spaces/scedlatioru/img-to-music/example/Adobe Photoshop Cc 2015 Crack Amtlib.dll Download NEW.md b/spaces/scedlatioru/img-to-music/example/Adobe Photoshop Cc 2015 Crack Amtlib.dll Download NEW.md deleted file mode 100644 index 95cb36de4662f31f44b5fe5f647c6f6330e614da..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Adobe Photoshop Cc 2015 Crack Amtlib.dll Download NEW.md +++ /dev/null @@ -1,82 +0,0 @@ - -

        Adobe Photoshop CC 2015 Crack Amtlib.dll Download: How to Activate Photoshop for Free

        - -

        Adobe Photoshop CC 2015 is one of the most popular and powerful photo editing software in the world. It allows you to create and enhance images, graphics, and designs with a variety of tools and features. However, Adobe Photoshop CC 2015 is not a free software, and you need to pay a monthly or yearly subscription fee to use it. If you don't want to pay for Photoshop, you might be looking for a way to crack it and use it for free.

        - -

        One of the most common methods to crack Photoshop is to use a file called amtlib.dll, which is a DLL file that is responsible for the licensing and activation of Adobe products. By replacing the original amtlib.dll file with a cracked one, you can bypass the activation process and use Photoshop without any restrictions. In this article, we will show you how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free.

        -

        adobe photoshop cc 2015 crack amtlib.dll download


        Download 🔗 https://gohhs.com/2uEAzW



        - -

        How to Download Adobe Photoshop CC 2015 Crack Amtlib.dll File

        - -

        The first step to crack Photoshop is to download the cracked amtlib.dll file that matches your system architecture (32-bit or 64-bit). There are many websites that offer the download of the amtlib.dll file, but you need to be careful because some of them might contain viruses or malware that can harm your computer. You also need to make sure that you download the correct version of the file that corresponds to the version of Photoshop that you have installed.

        - -

        One of the websites that you can use to download the Adobe Photoshop CC 2015 crack amtlib.dll file is DLL-files.com, which is a reliable and safe website that provides various DLL files for free. Here are the steps to download the file from this website:

        - -
          -
        1. Go to https://www.dll-files.com/amtlib.dll.html and scroll down to see the list of available versions of the file.
        2. -
        3. Choose the version that matches your system architecture (32-bit or 64-bit) and your Photoshop version (10.0.0.274 for 32-bit or 10.0.0.221 for 64-bit).
        4. -
        5. Click on the "Download" button next to the chosen version and wait for the download to finish.
        6. -
        7. Extract the ZIP file that contains the amtlib.dll file and save it in a safe location on your computer.
        8. -
        - -

        You can also use other websites that offer the download of the Adobe Photoshop CC 2015 crack amtlib.dll file, such as Google Drive or Archive.org, but you need to make sure that they are trustworthy and secure before downloading anything from them.

        - -

        How to Install Adobe Photoshop CC 2015 Crack Amtlib.dll File

        - -

        The next step to crack Photoshop is to install the cracked amtlib.dll file that you have downloaded in the previous step. To do this, you need to replace the original amtlib.dll file that is located in the installation folder of Photoshop with the cracked one. Here are the steps to install the Adobe Photoshop CC 2015 crack amtlib.dll file:

        - -
          -
        1. Locate the installation folder of Photoshop on your computer. The default location is C:\Program Files\Adobe\Adobe Photoshop CC 2015 for 32-bit systems or C:\Program Files (x86)\Adobe\Adobe Photoshop CC 2015 for 64-bit systems.
        2. -
        3. Find and rename the original amtlib.dll file that is in the installation folder. You can rename it to something like amtlib.dll.bak or amtlib.dll.old, so that you can restore it later if needed.
        4. -
        5. Copy and paste the cracked amtlib.dll file that you have downloaded and extracted in the previous step into the installation folder of Photoshop.
        6. -
        7. Run Photoshop as an administrator and enjoy using it for free.
        8. -
        - -

        You have successfully installed the Adobe Photoshop CC 2015 crack amtlib.dll file and activated Photoshop for free. You can now use all the features and tools of Photoshop without any limitations or restrictions.

        - -

        Conclusion

        - -

        In this article, we have shown you how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free. This method is one of the most common and easy ways to crack Photoshop, but it also has some risks and disadvantages. For example, using a cracked software might violate the terms and conditions of Adobe, and expose you to legal issues or penalties. Moreover, using a cracked software might compromise your computer security and performance, as it might contain viruses or malware that can damage your system or steal your data. Furthermore, using a cracked software might prevent you from receiving updates or support from Adobe, which might affect your user experience or cause compatibility issues with other software or devices.

        - -

        Therefore, we do not recommend or endorse using this method or any other method to crack Photoshop or any other software. We only provide this information for educational purposes only, and we are not responsible for any consequences that might arise from using this method or any other method. If you want to use Photoshop legally and safely, we suggest you buy a subscription from Adobe or use an alternative software that is free or cheaper than Photoshop.

        -

        -

        Meta description: Learn how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free. But be aware of the risks and disadvantages of using a cracked software.

        - -- Generate a cover image for the article: A cover image is a visual representation of the article that appears at the top of the page or in the social media posts. It should include the title of the article and some relevant images that capture the attention of the readers. A cover image should be clear, attractive and consistent with the theme of the article. Here is an example of a possible cover image for the article: - -

        Cover image: Cover image for Adobe Photoshop CC 2015 crack amtlib.dll download

        - -- Proofread the content: Proofreading is the process of checking and correcting any errors in spelling, grammar, punctuation or syntax in the content. Proofreading can help improve the readability and credibility of the content, and avoid any misunderstandings or confusions for the readers. You can use online tools such as Grammarly or Hemingway to help you proofread your content, or you can ask someone else to read your content and give you feedback. - -- Add some relevant links or images: Adding some relevant links or images can help enrich your content and provide more information or resources for your readers. You can add links to other articles that are related to your topic, or to sources that support your claims or facts. You can also add images that illustrate your points or show examples of what you are talking about. You can use online tools such as Unsplash or Pixabay to find free images that you can use for your content. However, you should always make sure that you have permission to use any link or image that you add, and that you give proper credit to their owners or creators. You should also avoid adding too many links or images that might distract your readers from your main message or make your content look cluttered or spammy. -

        Meta description: Learn how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free. But be aware of the risks and disadvantages of using a cracked software.

        - -- Generate a cover image for the article: A cover image is a visual representation of the article that appears at the top of the page or in the social media posts. It should include the title of the article and some relevant images that capture the attention of the readers. A cover image should be clear, attractive and consistent with the theme of the article. Here is an example of a possible cover image for the article: - -

        Cover image: Cover image for Adobe Photoshop CC 2015 crack amtlib.dll download

        - -- Proofread the content: Proofreading is the process of checking and correcting any errors in spelling, grammar, punctuation or syntax in the content. Proofreading can help improve the readability and credibility of the content, and avoid any misunderstandings or confusions for the readers. You can use online tools such as Grammarly or Hemingway to help you proofread your content, or you can ask someone else to read your content and give you feedback. - -- Add some relevant links or images: Adding some relevant links or images can help enrich your content and provide more information or resources for your readers. You can add links to other articles that are related to your topic, or to sources that support your claims or facts. You can also add images that illustrate your points or show examples of what you are talking about. You can use online tools such as Unsplash or Pixabay to find free images that you can use for your content. However, you should always make sure that you have permission to use any link or image that you add, and that you give proper credit to their owners or creators. You should also avoid adding too many links or images that might distract your readers from your main message or make your content look cluttered or spammy. -

        Conclusion

        - -

        In this article, we have shown you how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free. This method is one of the most common and easy ways to crack Photoshop, but it also has some risks and disadvantages. For example, using a cracked software might violate the terms and conditions of Adobe, and expose you to legal issues or penalties. Moreover, using a cracked software might compromise your computer security and performance, as it might contain viruses or malware that can damage your system or steal your data. Furthermore, using a cracked software might prevent you from receiving updates or support from Adobe, which might affect your user experience or cause compatibility issues with other software or devices.

        - -

        Therefore, we do not recommend or endorse using this method or any other method to crack Photoshop or any other software. We only provide this information for educational purposes only, and we are not responsible for any consequences that might arise from using this method or any other method. If you want to use Photoshop legally and safely, we suggest you buy a subscription from Adobe or use an alternative software that is free or cheaper than Photoshop.

        - -

        If you have tried this method or have any questions or comments about it, feel free to share them with us in the comment section below. We would love to hear from you and help you with any issues that you might have. Thank you for reading our article and we hope you have enjoyed it!

        -

        Conclusion

        . The conclusion summarizes the main points of the article and gives a final impression to the readers. The conclusion also includes a call to action, which is to invite the readers to try the crack method or to share their opinions in the comments. The conclusion is one paragraph long, which is appropriate for the length and complexity of the article. Therefore, I suggest you do not write another conclusion, but instead focus on other aspects of the article, such as writing a meta description, generating a cover image, proofreading the content, or adding some relevant links or images. These aspects can help improve the quality and attractiveness of the article, and increase its visibility and traffic. Here are some possible suggestions for you: - -- Write a meta description for the article: A meta description is a short summary of the article that appears in the search engine results page. It should include the keyword and some enticing information that makes the readers want to click on the article. A meta description should be between 150 and 160 characters long. Here is an example of a possible meta description for the article: - -

        Meta description: Learn how to download and install the Adobe Photoshop CC 2015 crack amtlib.dll file, and how to use it to activate Photoshop for free. But be aware of the risks and disadvantages of using a cracked software.

        - -- Generate a cover image for the article: A cover image is a visual representation of the article that appears at the top of the page or in the social media posts. It should include the title of the article and some relevant images that capture the attention of the readers. A cover image should be clear, attractive and consistent with the theme of the article. Here is an example of a possible cover image for the article: - -

        Cover image: Cover image for Adobe Photoshop CC 2015 crack amtlib.dll download

        - -- Proofread the content: Proofreading is the process of checking and correcting any errors in spelling, grammar, punctuation or syntax in the content. Proofreading can help improve the readability and credibility of the content, and avoid any misunderstandings or confusions for the readers. You can use online tools such as Grammarly or Hemingway to help you proofread your content, or you can ask someone else to read your content and give you feedback. - -- Add some relevant links or images: Adding some relevant links or images can help enrich your content and provide more information or resources for your readers. You can add links to other articles that are related to your topic, or to sources that support your claims or facts. You can also add images that illustrate your points or show examples of what you are talking about. You can use online tools such as Unsplash or Pixabay to find free images that you can use for your content. However, you should always make sure that you have permission to use any link or image that you add, and that you give proper credit to their owners or creators. You should also avoid adding too many links or images that might distract your readers from your main message or make your content look cluttered or spammy.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/scedlatioru/img-to-music/example/Omron Sysmac Cpm1a Software Free 27l _BEST_.md b/spaces/scedlatioru/img-to-music/example/Omron Sysmac Cpm1a Software Free 27l _BEST_.md deleted file mode 100644 index 258aded2cd7033f4f17ca046a2e7af703347f8c6..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Omron Sysmac Cpm1a Software Free 27l _BEST_.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Omron Sysmac Cpm1a Software Free 27l


        Download Zip ->>> https://gohhs.com/2uEAuG



        - -OMRON, 2009. All rights reserved. No part of this publication may be reproduced, stored in a retrieval system or transmitted in any form or. by any means: electronic, mechanical, photocopying, recording and / or other - without the prior written permission of the copyright holder. LLC Energia UWC Company. Russia, 190000, St. Petersburg, st. Dekabristov, 7, office 419. Tel.: +7 (812) 329-11-06. E-mail: sales@energyok.ru www.energyok.com Energia UWC LLC. Russia, 190000, St. Petersburg, st. Dekabristov, 7, office 419. Tel.: +7 (812) 329-11-06. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/scedlatioru/img-to-music/example/Qanoon E Mubashrat In Urdu Pdf Download LINK.md b/spaces/scedlatioru/img-to-music/example/Qanoon E Mubashrat In Urdu Pdf Download LINK.md deleted file mode 100644 index ec67ae34c33a4a27feaa09609d2fb8cbc124b4de..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/example/Qanoon E Mubashrat In Urdu Pdf Download LINK.md +++ /dev/null @@ -1,8 +0,0 @@ -
        -

        Kaster Womens Bib Short
        ScreenSaver 2015 Free Download [Serial Code]
        qwertyuiop DVR 4.9.1 Full Free + Crack Activation Key
        Samsung phone driver package
        download RamPro Office 2010 Serial Key With Keygen
        torrent for iphone 5 4s driver

        -


        How to download the e-book "Stellar" In Swedish
        Waking the Sleeping Dragon The Unveiling Game (excerpt)
        Big Bumpers 4.0.0 64-BIT FULL R2R3R3R3R
        Warcraft III DVD Ripper 0.5.4
        Der Verlag Der einzigen, die sich für alle Interessenten von Laozi-Verfassung der Menschenrechte findet in der Vergangenheit sich häufig als Gelehrter, Lehrer, Minister und Politiker beweisen konnte. Jehruyyaan naye kaam karen aap ke hindi ko ek gun aap k
        Fantasy Roleplay Licence N Tricesearche 1.0.0 Serial Keyl
        Waking the Sleeping Dragon The Unveiling Game (excerpt)
        Roll the Tide: How to Roll the Tide in Your Life and Business (Japanese Edition)
        Landskronik Av Människor och Aristokrati (1934) Fulll
        Big Bumpers 4.0.

        -

        Qanoon E Mubashrat In Urdu Pdf Download


        DOWNLOAD >>> https://gohhs.com/2uEzbp



        -

        iOS 9.1.2 For iPhone 5s/5c/5/4s Plus/4 Download
        iitb sis engineering biology project third semertum
        video de asunción de mons v
        get free premium skins for heroes clash of clan emiliaan free download
        heartless eng land battle of wesnoth new patches
        tik kapak, tik kapak 2016, tik kapak 2015
        song box yahoo music, songbox gg music, songbox music
        dainis narkis liudaiteellus

        -

        Azhar Hafeez Ud Durood
        Kismet 2.2 Free Download Full Version Kismet 2.2 Free Download
        love and life or leo to be like
        kol nista l htej geel aayegi
        The Order of the Phoenix: Complete Trilogy[DVD]
        kuai yong lua

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/dcn/__init__.py b/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/dcn/__init__.py deleted file mode 100644 index 32e3592f896d61b4127e09d0476381b9d55e32ff..0000000000000000000000000000000000000000 --- a/spaces/sczhou/CodeFormer/CodeFormer/basicsr/ops/dcn/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .deform_conv import (DeformConv, DeformConvPack, ModulatedDeformConv, ModulatedDeformConvPack, deform_conv, - modulated_deform_conv) - -__all__ = [ - 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'deform_conv', - 'modulated_deform_conv' -] diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/lightconv.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/lightconv.py deleted file mode 100644 index a940c6d9042563185e5a673f42137bcff7fa8d18..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/transformer/lightconv.py +++ /dev/null @@ -1,112 +0,0 @@ -"""Lightweight Convolution Module.""" - -import numpy -import torch -from torch import nn -import torch.nn.functional as F - - -MIN_VALUE = float(numpy.finfo(numpy.float32).min) - - -class LightweightConvolution(nn.Module): - """Lightweight Convolution layer. - - This implementation is based on - https://github.com/pytorch/fairseq/tree/master/fairseq - - Args: - wshare (int): the number of kernel of convolution - n_feat (int): the number of features - dropout_rate (float): dropout_rate - kernel_size (int): kernel size (length) - use_kernel_mask (bool): Use causal mask or not for convolution kernel - use_bias (bool): Use bias term or not. - - """ - - def __init__( - self, - wshare, - n_feat, - dropout_rate, - kernel_size, - use_kernel_mask=False, - use_bias=False, - ): - """Construct Lightweight Convolution layer.""" - super(LightweightConvolution, self).__init__() - - assert n_feat % wshare == 0 - self.wshare = wshare - self.use_kernel_mask = use_kernel_mask - self.dropout_rate = dropout_rate - self.kernel_size = kernel_size - self.padding_size = int(kernel_size / 2) - - # linear -> GLU -> lightconv -> linear - self.linear1 = nn.Linear(n_feat, n_feat * 2) - self.linear2 = nn.Linear(n_feat, n_feat) - self.act = nn.GLU() - - # lightconv related - self.weight = nn.Parameter( - torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1) - ) - self.use_bias = use_bias - if self.use_bias: - self.bias = nn.Parameter(torch.Tensor(n_feat)) - - # mask of kernel - kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2)) - kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1)) - self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1) - - def forward(self, query, key, value, mask): - """Forward of 'Lightweight Convolution'. - - This function takes query, key and value but uses only query. - This is just for compatibility with self-attention layer (attention.py) - - Args: - query (torch.Tensor): (batch, time1, d_model) input tensor - key (torch.Tensor): (batch, time2, d_model) NOT USED - value (torch.Tensor): (batch, time2, d_model) NOT USED - mask (torch.Tensor): (batch, time1, time2) mask - - Return: - x (torch.Tensor): (batch, time1, d_model) ouput - - """ - # linear -> GLU -> lightconv -> linear - x = query - B, T, C = x.size() - H = self.wshare - - # first liner layer - x = self.linear1(x) - - # GLU activation - x = self.act(x) - - # lightconv - x = x.transpose(1, 2).contiguous().view(-1, H, T) # B x C x T - weight = F.dropout(self.weight, self.dropout_rate, training=self.training) - if self.use_kernel_mask: - self.kernel_mask = self.kernel_mask.to(x.device) - weight = weight.masked_fill(self.kernel_mask == 0.0, float("-inf")) - weight = F.softmax(weight, dim=-1) - x = F.conv1d(x, weight, padding=self.padding_size, groups=self.wshare).view( - B, C, T - ) - if self.use_bias: - x = x + self.bias.view(1, -1, 1) - x = x.transpose(1, 2) # B x T x C - - if mask is not None and not self.use_kernel_mask: - mask = mask.transpose(-1, -2) - x = x.masked_fill(mask == 0, 0.0) - - # second linear layer - x = self.linear2(x) - return x diff --git a/spaces/segments-tobias/conex/espnet2/tasks/abs_task.py b/spaces/segments-tobias/conex/espnet2/tasks/abs_task.py deleted file mode 100644 index a836633a6bbb929f31f63cbcff8c52ee0b2214f1..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/tasks/abs_task.py +++ /dev/null @@ -1,1782 +0,0 @@ -from abc import ABC -from abc import abstractmethod -import argparse -from dataclasses import dataclass -from distutils.version import LooseVersion -import functools -import logging -import os -from pathlib import Path -import sys -from typing import Any -from typing import Callable -from typing import Dict -from typing import List -from typing import Optional -from typing import Sequence -from typing import Tuple -from typing import Union - -import humanfriendly -import numpy as np -import torch -import torch.multiprocessing -import torch.nn -import torch.optim -from torch.utils.data import DataLoader -from typeguard import check_argument_types -from typeguard import check_return_type -import wandb -import yaml - -from espnet import __version__ -from espnet.utils.cli_utils import get_commandline_args -from espnet2.iterators.abs_iter_factory import AbsIterFactory -from espnet2.iterators.chunk_iter_factory import ChunkIterFactory -from espnet2.iterators.multiple_iter_factory import MultipleIterFactory -from espnet2.iterators.sequence_iter_factory import SequenceIterFactory -from espnet2.main_funcs.collect_stats import collect_stats -from espnet2.optimizers.sgd import SGD -from espnet2.samplers.build_batch_sampler import BATCH_TYPES -from espnet2.samplers.build_batch_sampler import build_batch_sampler -from espnet2.samplers.unsorted_batch_sampler import UnsortedBatchSampler -from espnet2.schedulers.noam_lr import NoamLR -from espnet2.schedulers.warmup_lr import WarmupLR -from espnet2.torch_utils.load_pretrained_model import load_pretrained_model -from espnet2.torch_utils.model_summary import model_summary -from espnet2.torch_utils.pytorch_version import pytorch_cudnn_version -from espnet2.torch_utils.set_all_random_seed import set_all_random_seed -from espnet2.train.abs_espnet_model import AbsESPnetModel -from espnet2.train.class_choices import ClassChoices -from espnet2.train.dataset import AbsDataset -from espnet2.train.dataset import DATA_TYPES -from espnet2.train.dataset import ESPnetDataset -from espnet2.train.distributed_utils import DistributedOption -from espnet2.train.distributed_utils import free_port -from espnet2.train.distributed_utils import get_master_port -from espnet2.train.distributed_utils import get_node_rank -from espnet2.train.distributed_utils import get_num_nodes -from espnet2.train.distributed_utils import resolve_distributed_mode -from espnet2.train.iterable_dataset import IterableESPnetDataset -from espnet2.train.trainer import Trainer -from espnet2.utils.build_dataclass import build_dataclass -from espnet2.utils import config_argparse -from espnet2.utils.get_default_kwargs import get_default_kwargs -from espnet2.utils.nested_dict_action import NestedDictAction -from espnet2.utils.types import humanfriendly_parse_size_or_none -from espnet2.utils.types import int_or_none -from espnet2.utils.types import str2bool -from espnet2.utils.types import str2triple_str -from espnet2.utils.types import str_or_int -from espnet2.utils.types import str_or_none -from espnet2.utils.yaml_no_alias_safe_dump import yaml_no_alias_safe_dump - -if LooseVersion(torch.__version__) >= LooseVersion("1.5.0"): - from torch.multiprocessing.spawn import ProcessContext -else: - from torch.multiprocessing.spawn import SpawnContext as ProcessContext - - -optim_classes = dict( - adam=torch.optim.Adam, - sgd=SGD, - adadelta=torch.optim.Adadelta, - adagrad=torch.optim.Adagrad, - adamax=torch.optim.Adamax, - asgd=torch.optim.ASGD, - lbfgs=torch.optim.LBFGS, - rmsprop=torch.optim.RMSprop, - rprop=torch.optim.Rprop, -) -if LooseVersion(torch.__version__) >= LooseVersion("1.2.0"): - optim_classes["adamw"] = torch.optim.AdamW -try: - import torch_optimizer - - optim_classes.update( - accagd=torch_optimizer.AccSGD, - adabound=torch_optimizer.AdaBound, - adamod=torch_optimizer.AdaMod, - diffgrad=torch_optimizer.DiffGrad, - lamb=torch_optimizer.Lamb, - novograd=torch_optimizer.NovoGrad, - pid=torch_optimizer.PID, - # torch_optimizer<=0.0.1a10 doesn't support - # qhadam=torch_optimizer.QHAdam, - qhm=torch_optimizer.QHM, - radam=torch_optimizer.RAdam, - sgdw=torch_optimizer.SGDW, - yogi=torch_optimizer.Yogi, - ) - del torch_optimizer -except ImportError: - pass -try: - import apex - - optim_classes.update( - fusedadam=apex.optimizers.FusedAdam, - fusedlamb=apex.optimizers.FusedLAMB, - fusednovograd=apex.optimizers.FusedNovoGrad, - fusedsgd=apex.optimizers.FusedSGD, - ) - del apex -except ImportError: - pass -try: - import fairscale -except ImportError: - fairscale = None - - -scheduler_classes = dict( - ReduceLROnPlateau=torch.optim.lr_scheduler.ReduceLROnPlateau, - lambdalr=torch.optim.lr_scheduler.LambdaLR, - steplr=torch.optim.lr_scheduler.StepLR, - multisteplr=torch.optim.lr_scheduler.MultiStepLR, - exponentiallr=torch.optim.lr_scheduler.ExponentialLR, - CosineAnnealingLR=torch.optim.lr_scheduler.CosineAnnealingLR, -) -if LooseVersion(torch.__version__) >= LooseVersion("1.1.0"): - scheduler_classes.update( - noamlr=NoamLR, - warmuplr=WarmupLR, - ) -if LooseVersion(torch.__version__) >= LooseVersion("1.3.0"): - CosineAnnealingWarmRestarts = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts - scheduler_classes.update( - cycliclr=torch.optim.lr_scheduler.CyclicLR, - onecyclelr=torch.optim.lr_scheduler.OneCycleLR, - CosineAnnealingWarmRestarts=CosineAnnealingWarmRestarts, - ) -# To lower keys -optim_classes = {k.lower(): v for k, v in optim_classes.items()} -scheduler_classes = {k.lower(): v for k, v in scheduler_classes.items()} - - -@dataclass -class IteratorOptions: - preprocess_fn: callable - collate_fn: callable - data_path_and_name_and_type: list - shape_files: list - batch_size: int - batch_bins: int - batch_type: str - max_cache_size: float - max_cache_fd: int - distributed: bool - num_batches: Optional[int] - num_iters_per_epoch: Optional[int] - train: bool - - -class AbsTask(ABC): - # Use @staticmethod, or @classmethod, - # instead of instance method to avoid God classes - - # If you need more than one optimizers, change this value in inheritance - num_optimizers: int = 1 - trainer = Trainer - class_choices_list: List[ClassChoices] = [] - - def __init__(self): - raise RuntimeError("This class can't be instantiated.") - - @classmethod - @abstractmethod - def add_task_arguments(cls, parser: argparse.ArgumentParser): - pass - - @classmethod - @abstractmethod - def build_collate_fn( - cls, args: argparse.Namespace, train: bool - ) -> Callable[[Sequence[Dict[str, np.ndarray]]], Dict[str, torch.Tensor]]: - """Return "collate_fn", which is a callable object and given to DataLoader. - - >>> from torch.utils.data import DataLoader - >>> loader = DataLoader(collate_fn=cls.build_collate_fn(args, train=True), ...) - - In many cases, you can use our common collate_fn. - """ - raise NotImplementedError - - @classmethod - @abstractmethod - def build_preprocess_fn( - cls, args: argparse.Namespace, train: bool - ) -> Optional[Callable[[str, Dict[str, np.array]], Dict[str, np.ndarray]]]: - raise NotImplementedError - - @classmethod - @abstractmethod - def required_data_names( - cls, train: bool = True, inference: bool = False - ) -> Tuple[str, ...]: - """Define the required names by Task - - This function is used by - >>> cls.check_task_requirements() - If your model is defined as following, - - >>> from espnet2.train.abs_espnet_model import AbsESPnetModel - >>> class Model(AbsESPnetModel): - ... def forward(self, input, output, opt=None): pass - - then "required_data_names" should be as - - >>> required_data_names = ('input', 'output') - """ - raise NotImplementedError - - @classmethod - @abstractmethod - def optional_data_names( - cls, train: bool = True, inference: bool = False - ) -> Tuple[str, ...]: - """Define the optional names by Task - - This function is used by - >>> cls.check_task_requirements() - If your model is defined as follows, - - >>> from espnet2.train.abs_espnet_model import AbsESPnetModel - >>> class Model(AbsESPnetModel): - ... def forward(self, input, output, opt=None): pass - - then "optional_data_names" should be as - - >>> optional_data_names = ('opt',) - """ - raise NotImplementedError - - @classmethod - @abstractmethod - def build_model(cls, args: argparse.Namespace) -> AbsESPnetModel: - raise NotImplementedError - - @classmethod - def get_parser(cls) -> config_argparse.ArgumentParser: - assert check_argument_types() - - class ArgumentDefaultsRawTextHelpFormatter( - argparse.RawTextHelpFormatter, - argparse.ArgumentDefaultsHelpFormatter, - ): - pass - - parser = config_argparse.ArgumentParser( - description="base parser", - formatter_class=ArgumentDefaultsRawTextHelpFormatter, - ) - - # NOTE(kamo): Use '_' instead of '-' to avoid confusion. - # I think '-' looks really confusing if it's written in yaml. - - # NOTE(kamo): add_arguments(..., required=True) can't be used - # to provide --print_config mode. Instead of it, do as - parser.set_defaults(required=["output_dir"]) - - group = parser.add_argument_group("Common configuration") - - group.add_argument( - "--print_config", - action="store_true", - help="Print the config file and exit", - ) - group.add_argument( - "--log_level", - type=lambda x: x.upper(), - default="INFO", - choices=("ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"), - help="The verbose level of logging", - ) - group.add_argument( - "--dry_run", - type=str2bool, - default=False, - help="Perform process without training", - ) - group.add_argument( - "--iterator_type", - type=str, - choices=["sequence", "chunk", "task", "none"], - default="sequence", - help="Specify iterator type", - ) - - group.add_argument("--output_dir", type=str_or_none, default=None) - group.add_argument( - "--ngpu", - type=int, - default=0, - help="The number of gpus. 0 indicates CPU mode", - ) - group.add_argument("--seed", type=int, default=0, help="Random seed") - group.add_argument( - "--num_workers", - type=int, - default=1, - help="The number of workers used for DataLoader", - ) - group.add_argument( - "--num_att_plot", - type=int, - default=3, - help="The number images to plot the outputs from attention. " - "This option makes sense only when attention-based model", - ) - - group = parser.add_argument_group("distributed training related") - group.add_argument( - "--dist_backend", - default="nccl", - type=str, - help="distributed backend", - ) - group.add_argument( - "--dist_init_method", - type=str, - default="env://", - help='if init_method="env://", env values of "MASTER_PORT", "MASTER_ADDR", ' - '"WORLD_SIZE", and "RANK" are referred.', - ) - group.add_argument( - "--dist_world_size", - default=None, - type=int_or_none, - help="number of nodes for distributed training", - ) - group.add_argument( - "--dist_rank", - type=int_or_none, - default=None, - help="node rank for distributed training", - ) - group.add_argument( - # Not starting with "dist_" for compatibility to launch.py - "--local_rank", - type=int_or_none, - default=None, - help="local rank for distributed training. This option is used if " - "--multiprocessing_distributed=false", - ) - group.add_argument( - "--dist_master_addr", - default=None, - type=str_or_none, - help="The master address for distributed training. " - "This value is used when dist_init_method == 'env://'", - ) - group.add_argument( - "--dist_master_port", - default=None, - type=int_or_none, - help="The master port for distributed training" - "This value is used when dist_init_method == 'env://'", - ) - group.add_argument( - "--dist_launcher", - default=None, - type=str_or_none, - choices=["slurm", "mpi", None], - help="The launcher type for distributed training", - ) - group.add_argument( - "--multiprocessing_distributed", - default=False, - type=str2bool, - help="Use multi-processing distributed training to launch " - "N processes per node, which has N GPUs. This is the " - "fastest way to use PyTorch for either single node or " - "multi node data parallel training", - ) - group.add_argument( - "--unused_parameters", - type=str2bool, - default=False, - help="Whether to use the find_unused_parameters in " - "torch.nn.parallel.DistributedDataParallel ", - ) - group.add_argument( - "--sharded_ddp", - default=False, - type=str2bool, - help="Enable sharded training provided by fairscale", - ) - - group = parser.add_argument_group("cudnn mode related") - group.add_argument( - "--cudnn_enabled", - type=str2bool, - default=torch.backends.cudnn.enabled, - help="Enable CUDNN", - ) - group.add_argument( - "--cudnn_benchmark", - type=str2bool, - default=torch.backends.cudnn.benchmark, - help="Enable cudnn-benchmark mode", - ) - group.add_argument( - "--cudnn_deterministic", - type=str2bool, - default=True, - help="Enable cudnn-deterministic mode", - ) - - group = parser.add_argument_group("collect stats mode related") - group.add_argument( - "--collect_stats", - type=str2bool, - default=False, - help='Perform on "collect stats" mode', - ) - group.add_argument( - "--write_collected_feats", - type=str2bool, - default=False, - help='Write the output features from the model when "collect stats" mode', - ) - - group = parser.add_argument_group("Trainer related") - group.add_argument( - "--max_epoch", - type=int, - default=40, - help="The maximum number epoch to train", - ) - group.add_argument( - "--patience", - type=int_or_none, - default=None, - help="Number of epochs to wait without improvement " - "before stopping the training", - ) - group.add_argument( - "--val_scheduler_criterion", - type=str, - nargs=2, - default=("valid", "loss"), - help="The criterion used for the value given to the lr scheduler. " - 'Give a pair referring the phase, "train" or "valid",' - 'and the criterion name. The mode specifying "min" or "max" can ' - "be changed by --scheduler_conf", - ) - group.add_argument( - "--early_stopping_criterion", - type=str, - nargs=3, - default=("valid", "loss", "min"), - help="The criterion used for judging of early stopping. " - 'Give a pair referring the phase, "train" or "valid",' - 'the criterion name and the mode, "min" or "max", e.g. "acc,max".', - ) - group.add_argument( - "--best_model_criterion", - type=str2triple_str, - nargs="+", - default=[ - ("train", "loss", "min"), - ("valid", "loss", "min"), - ("train", "acc", "max"), - ("valid", "acc", "max"), - ], - help="The criterion used for judging of the best model. " - 'Give a pair referring the phase, "train" or "valid",' - 'the criterion name, and the mode, "min" or "max", e.g. "acc,max".', - ) - group.add_argument( - "--keep_nbest_models", - type=int, - nargs="+", - default=[10], - help="Remove previous snapshots excluding the n-best scored epochs", - ) - group.add_argument( - "--grad_clip", - type=float, - default=5.0, - help="Gradient norm threshold to clip", - ) - group.add_argument( - "--grad_clip_type", - type=float, - default=2.0, - help="The type of the used p-norm for gradient clip. Can be inf", - ) - group.add_argument( - "--grad_noise", - type=str2bool, - default=False, - help="The flag to switch to use noise injection to " - "gradients during training", - ) - group.add_argument( - "--accum_grad", - type=int, - default=1, - help="The number of gradient accumulation", - ) - group.add_argument( - "--no_forward_run", - type=str2bool, - default=False, - help="Just only iterating data loading without " - "model forwarding and training", - ) - group.add_argument( - "--resume", - type=str2bool, - default=False, - help="Enable resuming if checkpoint is existing", - ) - group.add_argument( - "--train_dtype", - default="float32", - choices=["float16", "float32", "float64"], - help="Data type for training.", - ) - group.add_argument( - "--use_amp", - type=str2bool, - default=False, - help="Enable Automatic Mixed Precision. This feature requires pytorch>=1.6", - ) - group.add_argument( - "--log_interval", - type=int_or_none, - default=None, - help="Show the logs every the number iterations in each epochs at the " - "training phase. If None is given, it is decided according the number " - "of training samples automatically .", - ) - group.add_argument( - "--use_tensorboard", - type=str2bool, - default=True, - help="Enable tensorboard logging", - ) - group.add_argument( - "--use_wandb", - type=str2bool, - default=False, - help="Enable wandb logging", - ) - group.add_argument( - "--wandb_project", - type=str, - default=None, - help="Specify wandb project", - ) - group.add_argument( - "--wandb_id", - type=str, - default=None, - help="Specify wandb id", - ) - group.add_argument( - "--detect_anomaly", - type=str2bool, - default=False, - help="Set torch.autograd.set_detect_anomaly", - ) - - group = parser.add_argument_group("Pretraining model related") - group.add_argument("--pretrain_path", help="This option is obsoleted") - group.add_argument( - "--init_param", - type=str, - default=[], - nargs="*", - help="Specify the file path used for initialization of parameters. " - "The format is ':::', " - "where file_path is the model file path, " - "src_key specifies the key of model states to be used in the model file, " - "dst_key specifies the attribute of the model to be initialized, " - "and exclude_keys excludes keys of model states for the initialization." - "e.g.\n" - " # Load all parameters" - " --init_param some/where/model.pth\n" - " # Load only decoder parameters" - " --init_param some/where/model.pth:decoder:decoder\n" - " # Load only decoder parameters excluding decoder.embed" - " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n" - " --init_param some/where/model.pth:decoder:decoder:decoder.embed\n", - ) - group.add_argument( - "--freeze_param", - type=str, - default=[], - nargs="*", - help="Freeze parameters", - ) - - group = parser.add_argument_group("BatchSampler related") - group.add_argument( - "--num_iters_per_epoch", - type=int_or_none, - default=None, - help="Restrict the number of iterations for training per epoch", - ) - group.add_argument( - "--batch_size", - type=int, - default=20, - help="The mini-batch size used for training. Used if batch_type='unsorted'," - " 'sorted', or 'folded'.", - ) - group.add_argument( - "--valid_batch_size", - type=int_or_none, - default=None, - help="If not given, the value of --batch_size is used", - ) - group.add_argument( - "--batch_bins", - type=int, - default=1000000, - help="The number of batch bins. Used if batch_type='length' or 'numel'", - ) - group.add_argument( - "--valid_batch_bins", - type=int_or_none, - default=None, - help="If not given, the value of --batch_bins is used", - ) - - group.add_argument("--train_shape_file", type=str, action="append", default=[]) - group.add_argument("--valid_shape_file", type=str, action="append", default=[]) - - group = parser.add_argument_group("Sequence iterator related") - _batch_type_help = "" - for key, value in BATCH_TYPES.items(): - _batch_type_help += f'"{key}":\n{value}\n' - group.add_argument( - "--batch_type", - type=str, - default="folded", - choices=list(BATCH_TYPES), - help=_batch_type_help, - ) - group.add_argument( - "--valid_batch_type", - type=str_or_none, - default=None, - choices=list(BATCH_TYPES) + [None], - help="If not given, the value of --batch_type is used", - ) - group.add_argument("--fold_length", type=int, action="append", default=[]) - group.add_argument( - "--sort_in_batch", - type=str, - default="descending", - choices=["descending", "ascending"], - help="Sort the samples in each mini-batches by the sample " - 'lengths. To enable this, "shape_file" must have the length information.', - ) - group.add_argument( - "--sort_batch", - type=str, - default="descending", - choices=["descending", "ascending"], - help="Sort mini-batches by the sample lengths", - ) - group.add_argument( - "--multiple_iterator", - type=str2bool, - default=False, - help="Use multiple iterator mode", - ) - - group = parser.add_argument_group("Chunk iterator related") - group.add_argument( - "--chunk_length", - type=str_or_int, - default=500, - help="Specify chunk length. e.g. '300', '300,400,500', or '300-400'." - "If multiple numbers separated by command are given, " - "one of them is selected randomly for each samples. " - "If two numbers are given with '-', it indicates the range of the choices. " - "Note that if the sequence length is shorter than the all chunk_lengths, " - "the sample is discarded. ", - ) - group.add_argument( - "--chunk_shift_ratio", - type=float, - default=0.5, - help="Specify the shift width of chunks. If it's less than 1, " - "allows the overlapping and if bigger than 1, there are some gaps " - "between each chunk.", - ) - group.add_argument( - "--num_cache_chunks", - type=int, - default=1024, - help="Shuffle in the specified number of chunks and generate mini-batches " - "More larger this value, more randomness can be obtained.", - ) - - group = parser.add_argument_group("Dataset related") - _data_path_and_name_and_type_help = ( - "Give three words splitted by comma. It's used for the training data. " - "e.g. '--train_data_path_and_name_and_type some/path/a.scp,foo,sound'. " - "The first value, some/path/a.scp, indicates the file path, " - "and the second, foo, is the key name used for the mini-batch data, " - "and the last, sound, decides the file type. " - "This option is repeatable, so you can input any number of features " - "for your task. Supported file types are as follows:\n\n" - ) - for key, dic in DATA_TYPES.items(): - _data_path_and_name_and_type_help += f'"{key}":\n{dic["help"]}\n\n' - - group.add_argument( - "--train_data_path_and_name_and_type", - type=str2triple_str, - action="append", - default=[], - help=_data_path_and_name_and_type_help, - ) - group.add_argument( - "--valid_data_path_and_name_and_type", - type=str2triple_str, - action="append", - default=[], - ) - group.add_argument( - "--allow_variable_data_keys", - type=str2bool, - default=False, - help="Allow the arbitrary keys for mini-batch with ignoring " - "the task requirements", - ) - group.add_argument( - "--max_cache_size", - type=humanfriendly.parse_size, - default=0.0, - help="The maximum cache size for data loader. e.g. 10MB, 20GB.", - ) - group.add_argument( - "--max_cache_fd", - type=int, - default=32, - help="The maximum number of file descriptors to be kept " - "as opened for ark files. " - "This feature is only valid when data type is 'kaldi_ark'.", - ) - group.add_argument( - "--valid_max_cache_size", - type=humanfriendly_parse_size_or_none, - default=None, - help="The maximum cache size for validation data loader. e.g. 10MB, 20GB. " - "If None, the 5 percent size of --max_cache_size", - ) - - group = parser.add_argument_group("Optimizer related") - for i in range(1, cls.num_optimizers + 1): - suf = "" if i == 1 else str(i) - group.add_argument( - f"--optim{suf}", - type=lambda x: x.lower(), - default="adadelta", - choices=list(optim_classes), - help="The optimizer type", - ) - group.add_argument( - f"--optim{suf}_conf", - action=NestedDictAction, - default=dict(), - help="The keyword arguments for optimizer", - ) - group.add_argument( - f"--scheduler{suf}", - type=lambda x: str_or_none(x.lower()), - default=None, - choices=list(scheduler_classes) + [None], - help="The lr scheduler type", - ) - group.add_argument( - f"--scheduler{suf}_conf", - action=NestedDictAction, - default=dict(), - help="The keyword arguments for lr scheduler", - ) - - cls.trainer.add_arguments(parser) - cls.add_task_arguments(parser) - - assert check_return_type(parser) - return parser - - @classmethod - def build_optimizers( - cls, - args: argparse.Namespace, - model: torch.nn.Module, - ) -> List[torch.optim.Optimizer]: - if cls.num_optimizers != 1: - raise RuntimeError( - "build_optimizers() must be overridden if num_optimizers != 1" - ) - - optim_class = optim_classes.get(args.optim) - if optim_class is None: - raise ValueError(f"must be one of {list(optim_classes)}: {args.optim}") - if args.sharded_ddp: - if fairscale is None: - raise RuntimeError("Requiring fairscale. Do 'pip install fairscale'") - optim = fairscale.optim.oss.OSS( - params=model.parameters(), optim=optim_class, **args.optim_conf - ) - else: - optim = optim_class(model.parameters(), **args.optim_conf) - - optimizers = [optim] - return optimizers - - @classmethod - def exclude_opts(cls) -> Tuple[str, ...]: - """The options not to be shown by --print_config""" - return "required", "print_config", "config", "ngpu" - - @classmethod - def get_default_config(cls) -> Dict[str, Any]: - """Return the configuration as dict. - - This method is used by print_config() - """ - - def get_class_type(name: str, classes: dict): - _cls = classes.get(name) - if _cls is None: - raise ValueError(f"must be one of {list(classes)}: {name}") - return _cls - - # This method is used only for --print_config - assert check_argument_types() - parser = cls.get_parser() - args, _ = parser.parse_known_args() - config = vars(args) - # Excludes the options not to be shown - for k in AbsTask.exclude_opts(): - config.pop(k) - - for i in range(1, cls.num_optimizers + 1): - suf = "" if i == 1 else str(i) - name = config[f"optim{suf}"] - optim_class = get_class_type(name, optim_classes) - conf = get_default_kwargs(optim_class) - # Overwrite the default by the arguments, - conf.update(config[f"optim{suf}_conf"]) - # and set it again - config[f"optim{suf}_conf"] = conf - - name = config[f"scheduler{suf}"] - if name is not None: - scheduler_class = get_class_type(name, scheduler_classes) - conf = get_default_kwargs(scheduler_class) - # Overwrite the default by the arguments, - conf.update(config[f"scheduler{suf}_conf"]) - # and set it again - config[f"scheduler{suf}_conf"] = conf - - for class_choices in cls.class_choices_list: - if getattr(args, class_choices.name) is not None: - class_obj = class_choices.get_class(getattr(args, class_choices.name)) - conf = get_default_kwargs(class_obj) - name = class_choices.name - # Overwrite the default by the arguments, - conf.update(config[f"{name}_conf"]) - # and set it again - config[f"{name}_conf"] = conf - return config - - @classmethod - def check_required_command_args(cls, args: argparse.Namespace): - assert check_argument_types() - for k in vars(args): - if "-" in k: - raise RuntimeError(f'Use "_" instead of "-": parser.get_parser("{k}")') - - required = ", ".join( - f"--{a}" for a in args.required if getattr(args, a) is None - ) - - if len(required) != 0: - parser = cls.get_parser() - parser.print_help(file=sys.stderr) - p = Path(sys.argv[0]).name - print(file=sys.stderr) - print( - f"{p}: error: the following arguments are required: " f"{required}", - file=sys.stderr, - ) - sys.exit(2) - - @classmethod - def check_task_requirements( - cls, - dataset: Union[AbsDataset, IterableESPnetDataset], - allow_variable_data_keys: bool, - train: bool, - inference: bool = False, - ) -> None: - """Check if the dataset satisfy the requirement of current Task""" - assert check_argument_types() - mes = ( - f"If you intend to use an additional input, modify " - f'"{cls.__name__}.required_data_names()" or ' - f'"{cls.__name__}.optional_data_names()". ' - f"Otherwise you need to set --allow_variable_data_keys true " - ) - - for k in cls.required_data_names(train, inference): - if not dataset.has_name(k): - raise RuntimeError( - f'"{cls.required_data_names(train, inference)}" are required for' - f' {cls.__name__}. but "{dataset.names()}" are input.\n{mes}' - ) - if not allow_variable_data_keys: - task_keys = cls.required_data_names( - train, inference - ) + cls.optional_data_names(train, inference) - for k in dataset.names(): - if k not in task_keys: - raise RuntimeError( - f"The data-name must be one of {task_keys} " - f'for {cls.__name__}: "{k}" is not allowed.\n{mes}' - ) - - @classmethod - def print_config(cls, file=sys.stdout) -> None: - assert check_argument_types() - # Shows the config: e.g. python train.py asr --print_config - config = cls.get_default_config() - file.write(yaml_no_alias_safe_dump(config, indent=4, sort_keys=False)) - - @classmethod - def main(cls, args: argparse.Namespace = None, cmd: Sequence[str] = None): - assert check_argument_types() - print(get_commandline_args(), file=sys.stderr) - if args is None: - parser = cls.get_parser() - args = parser.parse_args(cmd) - args.version = __version__ - if args.pretrain_path is not None: - raise RuntimeError("--pretrain_path is deprecated. Use --init_param") - if args.print_config: - cls.print_config() - sys.exit(0) - cls.check_required_command_args(args) - - # "distributed" is decided using the other command args - resolve_distributed_mode(args) - if not args.distributed or not args.multiprocessing_distributed: - cls.main_worker(args) - - else: - assert args.ngpu > 1, args.ngpu - # Multi-processing distributed mode: e.g. 2node-4process-4GPU - # | Host1 | Host2 | - # | Process1 | Process2 | <= Spawn processes - # |Child1|Child2|Child1|Child2| - # |GPU1 |GPU2 |GPU1 |GPU2 | - - # See also the following usage of --multiprocessing-distributed: - # https://github.com/pytorch/examples/blob/master/imagenet/main.py - num_nodes = get_num_nodes(args.dist_world_size, args.dist_launcher) - if num_nodes == 1: - args.dist_master_addr = "localhost" - args.dist_rank = 0 - # Single node distributed training with multi-GPUs - if ( - args.dist_init_method == "env://" - and get_master_port(args.dist_master_port) is None - ): - # Get the unused port - args.dist_master_port = free_port() - - # Assume that nodes use same number of GPUs each other - args.dist_world_size = args.ngpu * num_nodes - node_rank = get_node_rank(args.dist_rank, args.dist_launcher) - - # The following block is copied from: - # https://github.com/pytorch/pytorch/blob/master/torch/multiprocessing/spawn.py - error_queues = [] - processes = [] - mp = torch.multiprocessing.get_context("spawn") - for i in range(args.ngpu): - # Copy args - local_args = argparse.Namespace(**vars(args)) - - local_args.local_rank = i - local_args.dist_rank = args.ngpu * node_rank + i - local_args.ngpu = 1 - - process = mp.Process( - target=cls.main_worker, - args=(local_args,), - daemon=False, - ) - process.start() - processes.append(process) - error_queues.append(mp.SimpleQueue()) - # Loop on join until it returns True or raises an exception. - while not ProcessContext(processes, error_queues).join(): - pass - - @classmethod - def main_worker(cls, args: argparse.Namespace): - assert check_argument_types() - - # 0. Init distributed process - distributed_option = build_dataclass(DistributedOption, args) - # Setting distributed_option.dist_rank, etc. - distributed_option.init_options() - - # NOTE(kamo): Don't use logging before invoking logging.basicConfig() - if not distributed_option.distributed or distributed_option.dist_rank == 0: - if not distributed_option.distributed: - _rank = "" - else: - _rank = ( - f":{distributed_option.dist_rank}/" - f"{distributed_option.dist_world_size}" - ) - - # NOTE(kamo): - # logging.basicConfig() is invoked in main_worker() instead of main() - # because it can be invoked only once in a process. - # FIXME(kamo): Should we use logging.getLogger()? - logging.basicConfig( - level=args.log_level, - format=f"[{os.uname()[1].split('.')[0]}{_rank}]" - f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - else: - # Suppress logging if RANK != 0 - logging.basicConfig( - level="ERROR", - format=f"[{os.uname()[1].split('.')[0]}" - f":{distributed_option.dist_rank}/{distributed_option.dist_world_size}]" - f" %(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", - ) - # Invoking torch.distributed.init_process_group - distributed_option.init_torch_distributed() - - # 1. Set random-seed - set_all_random_seed(args.seed) - torch.backends.cudnn.enabled = args.cudnn_enabled - torch.backends.cudnn.benchmark = args.cudnn_benchmark - torch.backends.cudnn.deterministic = args.cudnn_deterministic - if args.detect_anomaly: - logging.info("Invoking torch.autograd.set_detect_anomaly(True)") - torch.autograd.set_detect_anomaly(args.detect_anomaly) - - # 2. Build model - model = cls.build_model(args=args) - if not isinstance(model, AbsESPnetModel): - raise RuntimeError( - f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}" - ) - model = model.to( - dtype=getattr(torch, args.train_dtype), - device="cuda" if args.ngpu > 0 else "cpu", - ) - for t in args.freeze_param: - for k, p in model.named_parameters(): - if k.startswith(t + ".") or k == t: - logging.info(f"Setting {k}.requires_grad = False") - p.requires_grad = False - - # 3. Build optimizer - optimizers = cls.build_optimizers(args, model=model) - - # 4. Build schedulers - schedulers = [] - for i, optim in enumerate(optimizers, 1): - suf = "" if i == 1 else str(i) - name = getattr(args, f"scheduler{suf}") - conf = getattr(args, f"scheduler{suf}_conf") - if name is not None: - cls_ = scheduler_classes.get(name) - if cls_ is None: - raise ValueError( - f"must be one of {list(scheduler_classes)}: {name}" - ) - scheduler = cls_(optim, **conf) - else: - scheduler = None - - schedulers.append(scheduler) - - logging.info(pytorch_cudnn_version()) - logging.info(model_summary(model)) - for i, (o, s) in enumerate(zip(optimizers, schedulers), 1): - suf = "" if i == 1 else str(i) - logging.info(f"Optimizer{suf}:\n{o}") - logging.info(f"Scheduler{suf}: {s}") - - # 5. Dump "args" to config.yaml - # NOTE(kamo): "args" should be saved after object-buildings are done - # because they are allowed to modify "args". - output_dir = Path(args.output_dir) - if not distributed_option.distributed or distributed_option.dist_rank == 0: - output_dir.mkdir(parents=True, exist_ok=True) - with (output_dir / "config.yaml").open("w", encoding="utf-8") as f: - logging.info( - f'Saving the configuration in {output_dir / "config.yaml"}' - ) - yaml_no_alias_safe_dump(vars(args), f, indent=4, sort_keys=False) - - # 6. Loads pre-trained model - for p in args.init_param: - logging.info(f"Loading pretrained params from {p}") - load_pretrained_model( - model=model, - init_param=p, - # NOTE(kamo): "cuda" for torch.load always indicates cuda:0 - # in PyTorch<=1.4 - map_location=f"cuda:{torch.cuda.current_device()}" - if args.ngpu > 0 - else "cpu", - ) - - if args.dry_run: - pass - elif args.collect_stats: - # Perform on collect_stats mode. This mode has two roles - # - Derive the length and dimension of all input data - # - Accumulate feats, square values, and the length for whitening - logging.info(args) - - if args.valid_batch_size is None: - args.valid_batch_size = args.batch_size - - if len(args.train_shape_file) != 0: - train_key_file = args.train_shape_file[0] - else: - train_key_file = None - if len(args.valid_shape_file) != 0: - valid_key_file = args.valid_shape_file[0] - else: - valid_key_file = None - - collect_stats( - model=model, - train_iter=cls.build_streaming_iterator( - data_path_and_name_and_type=args.train_data_path_and_name_and_type, - key_file=train_key_file, - batch_size=args.batch_size, - dtype=args.train_dtype, - num_workers=args.num_workers, - allow_variable_data_keys=args.allow_variable_data_keys, - ngpu=args.ngpu, - preprocess_fn=cls.build_preprocess_fn(args, train=False), - collate_fn=cls.build_collate_fn(args, train=False), - ), - valid_iter=cls.build_streaming_iterator( - data_path_and_name_and_type=args.valid_data_path_and_name_and_type, - key_file=valid_key_file, - batch_size=args.valid_batch_size, - dtype=args.train_dtype, - num_workers=args.num_workers, - allow_variable_data_keys=args.allow_variable_data_keys, - ngpu=args.ngpu, - preprocess_fn=cls.build_preprocess_fn(args, train=False), - collate_fn=cls.build_collate_fn(args, train=False), - ), - output_dir=output_dir, - ngpu=args.ngpu, - log_interval=args.log_interval, - write_collected_feats=args.write_collected_feats, - ) - else: - - # 7. Build iterator factories - if args.multiple_iterator: - train_iter_factory = cls.build_multiple_iter_factory( - args=args, - distributed_option=distributed_option, - mode="train", - ) - else: - train_iter_factory = cls.build_iter_factory( - args=args, - distributed_option=distributed_option, - mode="train", - ) - valid_iter_factory = cls.build_iter_factory( - args=args, - distributed_option=distributed_option, - mode="valid", - ) - if args.num_att_plot != 0: - plot_attention_iter_factory = cls.build_iter_factory( - args=args, - distributed_option=distributed_option, - mode="plot_att", - ) - else: - plot_attention_iter_factory = None - - # 8. Start training - if args.use_wandb: - if ( - not distributed_option.distributed - or distributed_option.dist_rank == 0 - ): - if args.wandb_project is None: - project = ( - "ESPnet_" - + cls.__name__ - + str(Path(".").resolve()).replace("/", "_") - ) - else: - project = args.wandb_project - if args.wandb_id is None: - wandb_id = str(output_dir).replace("/", "_") - else: - wandb_id = args.wandb_id - - wandb.init( - project=project, - dir=output_dir, - id=wandb_id, - resume="allow", - ) - wandb.config.update(args) - else: - # wandb also supports grouping for distributed training, - # but we only logs aggregated data, - # so it's enough to perform on rank0 node. - args.use_wandb = False - - # Don't give args to trainer.run() directly!!! - # Instead of it, define "Options" object and build here. - trainer_options = cls.trainer.build_options(args) - cls.trainer.run( - model=model, - optimizers=optimizers, - schedulers=schedulers, - train_iter_factory=train_iter_factory, - valid_iter_factory=valid_iter_factory, - plot_attention_iter_factory=plot_attention_iter_factory, - trainer_options=trainer_options, - distributed_option=distributed_option, - ) - - @classmethod - def build_iter_options( - cls, - args: argparse.Namespace, - distributed_option: DistributedOption, - mode: str, - ): - if mode == "train": - preprocess_fn = cls.build_preprocess_fn(args, train=True) - collate_fn = cls.build_collate_fn(args, train=True) - data_path_and_name_and_type = args.train_data_path_and_name_and_type - shape_files = args.train_shape_file - batch_size = args.batch_size - batch_bins = args.batch_bins - batch_type = args.batch_type - max_cache_size = args.max_cache_size - max_cache_fd = args.max_cache_fd - distributed = distributed_option.distributed - num_batches = None - num_iters_per_epoch = args.num_iters_per_epoch - train = True - - elif mode == "valid": - preprocess_fn = cls.build_preprocess_fn(args, train=False) - collate_fn = cls.build_collate_fn(args, train=False) - data_path_and_name_and_type = args.valid_data_path_and_name_and_type - shape_files = args.valid_shape_file - - if args.valid_batch_type is None: - batch_type = args.batch_type - else: - batch_type = args.valid_batch_type - if args.valid_batch_size is None: - batch_size = args.batch_size - else: - batch_size = args.valid_batch_size - if args.valid_batch_bins is None: - batch_bins = args.batch_bins - else: - batch_bins = args.valid_batch_bins - if args.valid_max_cache_size is None: - # Cache 5% of maximum size for validation loader - max_cache_size = 0.05 * args.max_cache_size - else: - max_cache_size = args.valid_max_cache_size - max_cache_fd = args.max_cache_fd - distributed = distributed_option.distributed - num_batches = None - num_iters_per_epoch = None - train = False - - elif mode == "plot_att": - preprocess_fn = cls.build_preprocess_fn(args, train=False) - collate_fn = cls.build_collate_fn(args, train=False) - data_path_and_name_and_type = args.valid_data_path_and_name_and_type - shape_files = args.valid_shape_file - batch_type = "unsorted" - batch_size = 1 - batch_bins = 0 - num_batches = args.num_att_plot - max_cache_fd = args.max_cache_fd - # num_att_plot should be a few sample ~ 3, so cache all data. - max_cache_size = np.inf if args.max_cache_size != 0.0 else 0.0 - # always False because plot_attention performs on RANK0 - distributed = False - num_iters_per_epoch = None - train = False - else: - raise NotImplementedError(f"mode={mode}") - - return IteratorOptions( - preprocess_fn=preprocess_fn, - collate_fn=collate_fn, - data_path_and_name_and_type=data_path_and_name_and_type, - shape_files=shape_files, - batch_type=batch_type, - batch_size=batch_size, - batch_bins=batch_bins, - num_batches=num_batches, - max_cache_size=max_cache_size, - max_cache_fd=max_cache_fd, - distributed=distributed, - num_iters_per_epoch=num_iters_per_epoch, - train=train, - ) - - @classmethod - def build_iter_factory( - cls, - args: argparse.Namespace, - distributed_option: DistributedOption, - mode: str, - kwargs: dict = None, - ) -> AbsIterFactory: - """Build a factory object of mini-batch iterator. - - This object is invoked at every epochs to build the iterator for each epoch - as following: - - >>> iter_factory = cls.build_iter_factory(...) - >>> for epoch in range(1, max_epoch): - ... for keys, batch in iter_fatory.build_iter(epoch): - ... model(**batch) - - The mini-batches for each epochs are fully controlled by this class. - Note that the random seed used for shuffling is decided as "seed + epoch" and - the generated mini-batches can be reproduces when resuming. - - Note that the definition of "epoch" doesn't always indicate - to run out of the whole training corpus. - "--num_iters_per_epoch" option restricts the number of iterations for each epoch - and the rest of samples for the originally epoch are left for the next epoch. - e.g. If The number of mini-batches equals to 4, the following two are same: - - - 1 epoch without "--num_iters_per_epoch" - - 4 epoch with "--num_iters_per_epoch" == 4 - - """ - assert check_argument_types() - iter_options = cls.build_iter_options(args, distributed_option, mode) - - # Overwrite iter_options if any kwargs is given - if kwargs is not None: - for k, v in kwargs.items(): - setattr(iter_options, k, v) - - if args.iterator_type == "sequence": - return cls.build_sequence_iter_factory( - args=args, - iter_options=iter_options, - mode=mode, - ) - elif args.iterator_type == "chunk": - return cls.build_chunk_iter_factory( - args=args, - iter_options=iter_options, - mode=mode, - ) - elif args.iterator_type == "task": - return cls.build_task_iter_factory( - args=args, - iter_options=iter_options, - mode=mode, - ) - else: - raise RuntimeError(f"Not supported: iterator_type={args.iterator_type}") - - @classmethod - def build_sequence_iter_factory( - cls, args: argparse.Namespace, iter_options: IteratorOptions, mode: str - ) -> AbsIterFactory: - assert check_argument_types() - - dataset = ESPnetDataset( - iter_options.data_path_and_name_and_type, - float_dtype=args.train_dtype, - preprocess=iter_options.preprocess_fn, - max_cache_size=iter_options.max_cache_size, - max_cache_fd=iter_options.max_cache_fd, - ) - cls.check_task_requirements( - dataset, args.allow_variable_data_keys, train=iter_options.train - ) - - if Path( - Path(iter_options.data_path_and_name_and_type[0][0]).parent, "utt2category" - ).exists(): - utt2category_file = str( - Path( - Path(iter_options.data_path_and_name_and_type[0][0]).parent, - "utt2category", - ) - ) - else: - utt2category_file = None - batch_sampler = build_batch_sampler( - type=iter_options.batch_type, - shape_files=iter_options.shape_files, - fold_lengths=args.fold_length, - batch_size=iter_options.batch_size, - batch_bins=iter_options.batch_bins, - sort_in_batch=args.sort_in_batch, - sort_batch=args.sort_batch, - drop_last=False, - min_batch_size=torch.distributed.get_world_size() - if iter_options.distributed - else 1, - utt2category_file=utt2category_file, - ) - - batches = list(batch_sampler) - if iter_options.num_batches is not None: - batches = batches[: iter_options.num_batches] - - bs_list = [len(batch) for batch in batches] - - logging.info(f"[{mode}] dataset:\n{dataset}") - logging.info(f"[{mode}] Batch sampler: {batch_sampler}") - logging.info( - f"[{mode}] mini-batch sizes summary: N-batch={len(bs_list)}, " - f"mean={np.mean(bs_list):.1f}, min={np.min(bs_list)}, max={np.max(bs_list)}" - ) - - if iter_options.distributed: - world_size = torch.distributed.get_world_size() - rank = torch.distributed.get_rank() - for batch in batches: - if len(batch) < world_size: - raise RuntimeError( - f"The batch-size must be equal or more than world_size: " - f"{len(batch)} < {world_size}" - ) - batches = [batch[rank::world_size] for batch in batches] - - return SequenceIterFactory( - dataset=dataset, - batches=batches, - seed=args.seed, - num_iters_per_epoch=iter_options.num_iters_per_epoch, - shuffle=iter_options.train, - num_workers=args.num_workers, - collate_fn=iter_options.collate_fn, - pin_memory=args.ngpu > 0, - ) - - @classmethod - def build_chunk_iter_factory( - cls, - args: argparse.Namespace, - iter_options: IteratorOptions, - mode: str, - ) -> AbsIterFactory: - assert check_argument_types() - - dataset = ESPnetDataset( - iter_options.data_path_and_name_and_type, - float_dtype=args.train_dtype, - preprocess=iter_options.preprocess_fn, - max_cache_size=iter_options.max_cache_size, - max_cache_fd=iter_options.max_cache_fd, - ) - cls.check_task_requirements( - dataset, args.allow_variable_data_keys, train=iter_options.train - ) - - if len(iter_options.shape_files) == 0: - key_file = iter_options.data_path_and_name_and_type[0][0] - else: - key_file = iter_options.shape_files[0] - - batch_sampler = UnsortedBatchSampler(batch_size=1, key_file=key_file) - batches = list(batch_sampler) - if iter_options.num_batches is not None: - batches = batches[: iter_options.num_batches] - logging.info(f"[{mode}] dataset:\n{dataset}") - - if iter_options.distributed: - world_size = torch.distributed.get_world_size() - rank = torch.distributed.get_rank() - if len(batches) < world_size: - raise RuntimeError("Number of samples is smaller than world_size") - if iter_options.batch_size < world_size: - raise RuntimeError("batch_size must be equal or more than world_size") - - if rank < iter_options.batch_size % world_size: - batch_size = iter_options.batch_size // world_size + 1 - else: - batch_size = iter_options.batch_size // world_size - num_cache_chunks = args.num_cache_chunks // world_size - # NOTE(kamo): Split whole corpus by sample numbers without considering - # each of the lengths, therefore the number of iteration counts are not - # always equal to each other and the iterations are limitted - # by the fewest iterations. - # i.e. the samples over the counts are discarded. - batches = batches[rank::world_size] - else: - batch_size = iter_options.batch_size - num_cache_chunks = args.num_cache_chunks - - return ChunkIterFactory( - dataset=dataset, - batches=batches, - seed=args.seed, - batch_size=batch_size, - # For chunk iterator, - # --num_iters_per_epoch doesn't indicate the number of iterations, - # but indicates the number of samples. - num_samples_per_epoch=iter_options.num_iters_per_epoch, - shuffle=iter_options.train, - num_workers=args.num_workers, - collate_fn=iter_options.collate_fn, - pin_memory=args.ngpu > 0, - chunk_length=args.chunk_length, - chunk_shift_ratio=args.chunk_shift_ratio, - num_cache_chunks=num_cache_chunks, - ) - - # NOTE(kamo): Not abstract class - @classmethod - def build_task_iter_factory( - cls, - args: argparse.Namespace, - iter_options: IteratorOptions, - mode: str, - ) -> AbsIterFactory: - """Build task specific iterator factory - - Example: - - >>> class YourTask(AbsTask): - ... @classmethod - ... def add_task_arguments(cls, parser: argparse.ArgumentParser): - ... parser.set_defaults(iterator_type="task") - ... - ... @classmethod - ... def build_task_iter_factory( - ... cls, - ... args: argparse.Namespace, - ... iter_options: IteratorOptions, - ... mode: str, - ... ): - ... return FooIterFactory(...) - ... - ... @classmethod - ... def build_iter_options( - .... args: argparse.Namespace, - ... distributed_option: DistributedOption, - ... mode: str - ... ): - ... # if you need to customize options object - """ - raise NotImplementedError - - @classmethod - def build_multiple_iter_factory( - cls, args: argparse.Namespace, distributed_option: DistributedOption, mode: str - ): - assert check_argument_types() - iter_options = cls.build_iter_options(args, distributed_option, mode) - assert len(iter_options.data_path_and_name_and_type) > 0, len( - iter_options.data_path_and_name_and_type - ) - - # 1. Sanity check - num_splits = None - for path in [ - path for path, _, _ in iter_options.data_path_and_name_and_type - ] + list(iter_options.shape_files): - if not Path(path).is_dir(): - raise RuntimeError(f"{path} is not a directory") - p = Path(path) / "num_splits" - if not p.exists(): - raise FileNotFoundError(f"{p} is not found") - with p.open() as f: - _num_splits = int(f.read()) - if num_splits is not None and num_splits != _num_splits: - raise RuntimeError( - f"Number of splits are mismathed: " - f"{iter_options.data_path_and_name_and_type[0][0]} and {path}" - ) - num_splits = _num_splits - - for i in range(num_splits): - p = Path(path) / f"split.{i}" - if not p.exists(): - raise FileNotFoundError(f"{p} is not found") - - # 2. Create functions to build an iter factory for each splits - data_path_and_name_and_type_list = [ - [ - (str(Path(p) / f"split.{i}"), n, t) - for p, n, t in iter_options.data_path_and_name_and_type - ] - for i in range(num_splits) - ] - shape_files_list = [ - [str(Path(s) / f"split.{i}") for s in iter_options.shape_files] - for i in range(num_splits) - ] - num_iters_per_epoch_list = [ - (iter_options.num_iters_per_epoch + i) // num_splits - if iter_options.num_iters_per_epoch is not None - else None - for i in range(num_splits) - ] - max_cache_size = iter_options.max_cache_size / num_splits - - # Note that iter-factories are built for each epoch at runtime lazily. - build_funcs = [ - functools.partial( - cls.build_iter_factory, - args, - distributed_option, - mode, - kwargs=dict( - data_path_and_name_and_type=_data_path_and_name_and_type, - shape_files=_shape_files, - num_iters_per_epoch=_num_iters_per_epoch, - max_cache_size=max_cache_size, - ), - ) - for ( - _data_path_and_name_and_type, - _shape_files, - _num_iters_per_epoch, - ) in zip( - data_path_and_name_and_type_list, - shape_files_list, - num_iters_per_epoch_list, - ) - ] - - # 3. Build MultipleIterFactory - return MultipleIterFactory( - build_funcs=build_funcs, shuffle=iter_options.train, seed=args.seed - ) - - @classmethod - def build_streaming_iterator( - cls, - data_path_and_name_and_type, - preprocess_fn, - collate_fn, - key_file: str = None, - batch_size: int = 1, - dtype: str = np.float32, - num_workers: int = 1, - allow_variable_data_keys: bool = False, - ngpu: int = 0, - inference: bool = False, - ) -> DataLoader: - """Build DataLoader using iterable dataset""" - assert check_argument_types() - # For backward compatibility for pytorch DataLoader - if collate_fn is not None: - kwargs = dict(collate_fn=collate_fn) - else: - kwargs = {} - - # IterableDataset is supported from pytorch=1.2 - if LooseVersion(torch.__version__) >= LooseVersion("1.2"): - dataset = IterableESPnetDataset( - data_path_and_name_and_type, - float_dtype=dtype, - preprocess=preprocess_fn, - key_file=key_file, - ) - if dataset.apply_utt2category: - kwargs.update(batch_size=1) - else: - kwargs.update(batch_size=batch_size) - else: - dataset = ESPnetDataset( - data_path_and_name_and_type, - float_dtype=dtype, - preprocess=preprocess_fn, - ) - if key_file is None: - key_file = data_path_and_name_and_type[0][0] - batch_sampler = UnsortedBatchSampler( - batch_size=batch_size, - key_file=key_file, - drop_last=False, - ) - kwargs.update(batch_sampler=batch_sampler) - - cls.check_task_requirements( - dataset, allow_variable_data_keys, train=False, inference=inference - ) - - return DataLoader( - dataset=dataset, - pin_memory=ngpu > 0, - num_workers=num_workers, - **kwargs, - ) - - # ~~~~~~~~~ The methods below are mainly used for inference ~~~~~~~~~ - @classmethod - def build_model_from_file( - cls, - config_file: Union[Path, str], - model_file: Union[Path, str] = None, - device: str = "cpu", - ) -> Tuple[AbsESPnetModel, argparse.Namespace]: - """This method is used for inference or fine-tuning. - - Args: - config_file: The yaml file saved when training. - model_file: The model file saved when training. - device: - - """ - assert check_argument_types() - config_file = Path(config_file) - - with config_file.open("r", encoding="utf-8") as f: - args = yaml.safe_load(f) - args = argparse.Namespace(**args) - model = cls.build_model(args) - if not isinstance(model, AbsESPnetModel): - raise RuntimeError( - f"model must inherit {AbsESPnetModel.__name__}, but got {type(model)}" - ) - model.to(device) - if model_file is not None: - if device == "cuda": - # NOTE(kamo): "cuda" for torch.load always indicates cuda:0 - # in PyTorch<=1.4 - device = f"cuda:{torch.cuda.current_device()}" - model.load_state_dict(torch.load(model_file, map_location=device), strict=False) # TC Marker - - return model, args diff --git a/spaces/senquan/ChuanhuChatGPT/presets.py b/spaces/senquan/ChuanhuChatGPT/presets.py deleted file mode 100644 index 2a518eabbc48400cd76a45163d6910abf57532a0..0000000000000000000000000000000000000000 --- a/spaces/senquan/ChuanhuChatGPT/presets.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding:utf-8 -*- - -# ChatGPT 设置 -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀 -error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误 -connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时 -read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时 -proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误 -ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误 -no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位 - -max_token_streaming = 3500 # 流式对话时的最大 token 数 -timeout_streaming = 5 # 流式对话时的超时时间 -max_token_all = 3500 # 非流式对话时的最大 token 数 -timeout_all = 200 # 非流式对话时的超时时间 -enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -title = """

        川虎ChatGPT 🚀

        """ -description = """\ -
        - -由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 - -访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本 - -此App使用 `gpt-3.5-turbo` 大语言模型 -
        -""" - -summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", -] # 可选的模型 - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in 中文""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in 中文 -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Answer in the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch. -If the context isn't useful, return the original answer. -""" diff --git a/spaces/shelby/scan_rotation_app/README.md b/spaces/shelby/scan_rotation_app/README.md deleted file mode 100644 index f95a0bdf5476d52b72e1cbca113ed0317d7ed678..0000000000000000000000000000000000000000 --- a/spaces/shelby/scan_rotation_app/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Scan_rotation_app -emoji: 🚀 -colorFrom: yellow -colorTo: red -sdk: gradio -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/shencc/gpt/request_llm/edge_gpt.py b/spaces/shencc/gpt/request_llm/edge_gpt.py deleted file mode 100644 index bbf84000d84a42de80d3c051a24f06336af76aaf..0000000000000000000000000000000000000000 --- a/spaces/shencc/gpt/request_llm/edge_gpt.py +++ /dev/null @@ -1,409 +0,0 @@ -""" -======================================================================== -第一部分:来自EdgeGPT.py -https://github.com/acheong08/EdgeGPT -======================================================================== -""" - -import argparse -import asyncio -import json -import os -import random -import re -import ssl -import sys -import uuid -from enum import Enum -from typing import Generator -from typing import Literal -from typing import Optional -from typing import Union -import websockets.client as websockets - -DELIMITER = "\x1e" - - -# Generate random IP between range 13.104.0.0/14 -FORWARDED_IP = ( - f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" -) - -HEADERS = { - "accept": "application/json", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "sec-ch-ua": '"Not_A Brand";v="99", "Microsoft Edge";v="110", "Chromium";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"109.0.1518.78"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": "", - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "x-ms-client-request-id": str(uuid.uuid4()), - "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32", - "Referer": "https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx", - "Referrer-Policy": "origin-when-cross-origin", - "x-forwarded-for": FORWARDED_IP, -} - -HEADERS_INIT_CONVER = { - "authority": "edgeservices.bing.com", - "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7", - "accept-language": "en-US,en;q=0.9", - "cache-control": "max-age=0", - "sec-ch-ua": '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - "sec-ch-ua-arch": '"x86"', - "sec-ch-ua-bitness": '"64"', - "sec-ch-ua-full-version": '"110.0.1587.69"', - "sec-ch-ua-full-version-list": '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-model": '""', - "sec-ch-ua-platform": '"Windows"', - "sec-ch-ua-platform-version": '"15.0.0"', - "sec-fetch-dest": "document", - "sec-fetch-mode": "navigate", - "sec-fetch-site": "none", - "sec-fetch-user": "?1", - "upgrade-insecure-requests": "1", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69", - "x-edge-shopping-flag": "1", - "x-forwarded-for": FORWARDED_IP, -} - -def get_ssl_context(): - import certifi - ssl_context = ssl.create_default_context() - ssl_context.load_verify_locations(certifi.where()) - return ssl_context - - - -class NotAllowedToAccess(Exception): - pass - - -class ConversationStyle(Enum): - creative = "h3imaginative,clgalileo,gencontentv3" - balanced = "galileo" - precise = "h3precise,clgalileo" - - -CONVERSATION_STYLE_TYPE = Optional[ - Union[ConversationStyle, Literal["creative", "balanced", "precise"]] -] - - -def _append_identifier(msg: dict) -> str: - """ - Appends special character to end of message to identify end of message - """ - # Convert dict to json string - return json.dumps(msg) + DELIMITER - - -def _get_ran_hex(length: int = 32) -> str: - """ - Returns random hex string - """ - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -class _ChatHubRequest: - """ - Request object for ChatHub - """ - - def __init__( - self, - conversation_signature: str, - client_id: str, - conversation_id: str, - invocation_id: int = 0, - ) -> None: - self.struct: dict = {} - - self.client_id: str = client_id - self.conversation_id: str = conversation_id - self.conversation_signature: str = conversation_signature - self.invocation_id: int = invocation_id - - def update( - self, - prompt, - conversation_style, - options, - ) -> None: - """ - Updates request object - """ - if options is None: - options = [ - "deepleo", - "enable_debug_commands", - "disable_emoji_spoken_text", - "enablemm", - ] - if conversation_style: - if not isinstance(conversation_style, ConversationStyle): - conversation_style = getattr(ConversationStyle, conversation_style) - options = [ - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - conversation_style.value, - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - ] - self.struct = { - "arguments": [ - { - "source": "cib", - "optionsSets": options, - "sliceIds": [ - "222dtappid", - "225cricinfo", - "224locals0", - ], - "traceId": _get_ran_hex(32), - "isStartOfSession": self.invocation_id == 0, - "message": { - "author": "user", - "inputMethod": "Keyboard", - "text": prompt, - "messageType": "Chat", - }, - "conversationSignature": self.conversation_signature, - "participant": { - "id": self.client_id, - }, - "conversationId": self.conversation_id, - }, - ], - "invocationId": str(self.invocation_id), - "target": "chat", - "type": 4, - } - self.invocation_id += 1 - - -class _Conversation: - """ - Conversation API - """ - - def __init__( - self, - cookies, - proxy, - ) -> None: - self.struct: dict = { - "conversationId": None, - "clientId": None, - "conversationSignature": None, - "result": {"value": "Success", "message": None}, - } - import httpx - self.proxy = proxy - proxy = ( - proxy - or os.environ.get("all_proxy") - or os.environ.get("ALL_PROXY") - or os.environ.get("https_proxy") - or os.environ.get("HTTPS_PROXY") - or None - ) - if proxy is not None and proxy.startswith("socks5h://"): - proxy = "socks5://" + proxy[len("socks5h://") :] - self.session = httpx.Client( - proxies=proxy, - timeout=30, - headers=HEADERS_INIT_CONVER, - ) - for cookie in cookies: - self.session.cookies.set(cookie["name"], cookie["value"]) - - # Send GET request - response = self.session.get( - url=os.environ.get("BING_PROXY_URL") - or "https://edgeservices.bing.com/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - response = self.session.get( - "https://edge.churchless.tech/edgesvc/turing/conversation/create", - ) - if response.status_code != 200: - print(f"Status code: {response.status_code}") - print(response.text) - print(response.url) - raise Exception("Authentication failed") - try: - self.struct = response.json() - except (json.decoder.JSONDecodeError, NotAllowedToAccess) as exc: - raise Exception( - "Authentication failed. You have not been accepted into the beta.", - ) from exc - if self.struct["result"]["value"] == "UnauthorizedRequest": - raise NotAllowedToAccess(self.struct["result"]["message"]) - - -class _ChatHub: - """ - Chat API - """ - - def __init__(self, conversation) -> None: - self.wss = None - self.request: _ChatHubRequest - self.loop: bool - self.task: asyncio.Task - print(conversation.struct) - self.request = _ChatHubRequest( - conversation_signature=conversation.struct["conversationSignature"], - client_id=conversation.struct["clientId"], - conversation_id=conversation.struct["conversationId"], - ) - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - if self.wss and not self.wss.closed: - await self.wss.close() - # Check if websocket is closed - self.wss = await websockets.connect( - wss_link, - extra_headers=HEADERS, - max_size=None, - ssl=get_ssl_context() - ) - await self._initial_handshake() - # Construct a ChatHub request - self.request.update( - prompt=prompt, - conversation_style=conversation_style, - options=options, - ) - # Send request - await self.wss.send(_append_identifier(self.request.struct)) - final = False - while not final: - objects = str(await self.wss.recv()).split(DELIMITER) - for obj in objects: - if obj is None or not obj: - continue - response = json.loads(obj) - if response.get("type") != 2 and raw: - yield False, response - elif response.get("type") == 1 and response["arguments"][0].get( - "messages", - ): - resp_txt = response["arguments"][0]["messages"][0]["adaptiveCards"][ - 0 - ]["body"][0].get("text") - yield False, resp_txt - elif response.get("type") == 2: - final = True - yield True, response - - async def _initial_handshake(self) -> None: - await self.wss.send(_append_identifier({"protocol": "json", "version": 1})) - await self.wss.recv() - - async def close(self) -> None: - """ - Close the connection - """ - if self.wss and not self.wss.closed: - await self.wss.close() - - -class NewbingChatbot: - """ - Combines everything to make it seamless - """ - - def __init__( - self, - cookies, - proxy - ) -> None: - if cookies is None: - cookies = {} - self.cookies = cookies - self.proxy = proxy - self.chat_hub: _ChatHub = _ChatHub( - _Conversation(self.cookies, self.proxy), - ) - - async def ask( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - options: dict = None, - ) -> dict: - """ - Ask a question to the bot - """ - async for final, response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - options=options, - ): - if final: - return response - await self.chat_hub.wss.close() - return None - - async def ask_stream( - self, - prompt: str, - wss_link: str, - conversation_style: CONVERSATION_STYLE_TYPE = None, - raw: bool = False, - options: dict = None, - ) -> Generator[str, None, None]: - """ - Ask a question to the bot - """ - async for response in self.chat_hub.ask_stream( - prompt=prompt, - conversation_style=conversation_style, - wss_link=wss_link, - raw=raw, - options=options, - ): - yield response - - async def close(self) -> None: - """ - Close the connection - """ - await self.chat_hub.close() - - async def reset(self) -> None: - """ - Reset the conversation - """ - await self.close() - self.chat_hub = _ChatHub(_Conversation(self.cookies, self.proxy)) - - diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/train/mel_processing.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/train/mel_processing.py deleted file mode 100644 index 4ec4d823e5c11623d9015d5814fe316c0a6a6cab..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/train/mel_processing.py +++ /dev/null @@ -1,130 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - return dynamic_range_compression_torch(magnitudes) - - -def spectral_de_normalize_torch(magnitudes): - return dynamic_range_decompression_torch(magnitudes) - - -# Reusable banks -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - """Convert waveform into Linear-frequency Linear-amplitude spectrogram. - - Args: - y :: (B, T) - Audio waveforms - n_fft - sampling_rate - hop_size - win_size - center - Returns: - :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram - """ - # Validation - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - # Window - Cache if needed - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - # Padding - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2) - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - # MelBasis - Cache if needed - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn( - sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax - ) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=spec.dtype, device=spec.device - ) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame) - melspec = torch.matmul(mel_basis[fmax_dtype_device], spec) - melspec = spectral_normalize_torch(melspec) - return melspec - - -def mel_spectrogram_torch( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - """Convert waveform into Mel-frequency Log-amplitude spectrogram. - - Args: - y :: (B, T) - Waveforms - Returns: - melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram - """ - # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame) - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - - return melspec diff --git a/spaces/shgao/EditAnything/ldm/modules/midas/midas/blocks.py b/spaces/shgao/EditAnything/ldm/modules/midas/midas/blocks.py deleted file mode 100644 index 2145d18fa98060a618536d9a64fe6589e9be4f78..0000000000000000000000000000000000000000 --- a/spaces/shgao/EditAnything/ldm/modules/midas/midas/blocks.py +++ /dev/null @@ -1,342 +0,0 @@ -import torch -import torch.nn as nn - -from .vit import ( - _make_pretrained_vitb_rn50_384, - _make_pretrained_vitl16_384, - _make_pretrained_vitb16_384, - forward_vit, -) - -def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",): - if backbone == "vitl16_384": - pretrained = _make_pretrained_vitl16_384( - use_pretrained, hooks=hooks, use_readout=use_readout - ) - scratch = _make_scratch( - [256, 512, 1024, 1024], features, groups=groups, expand=expand - ) # ViT-L/16 - 85.0% Top1 (backbone) - elif backbone == "vitb_rn50_384": - pretrained = _make_pretrained_vitb_rn50_384( - use_pretrained, - hooks=hooks, - use_vit_only=use_vit_only, - use_readout=use_readout, - ) - scratch = _make_scratch( - [256, 512, 768, 768], features, groups=groups, expand=expand - ) # ViT-H/16 - 85.0% Top1 (backbone) - elif backbone == "vitb16_384": - pretrained = _make_pretrained_vitb16_384( - use_pretrained, hooks=hooks, use_readout=use_readout - ) - scratch = _make_scratch( - [96, 192, 384, 768], features, groups=groups, expand=expand - ) # ViT-B/16 - 84.6% Top1 (backbone) - elif backbone == "resnext101_wsl": - pretrained = _make_pretrained_resnext101_wsl(use_pretrained) - scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3 - elif backbone == "efficientnet_lite3": - pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable) - scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3 - else: - print(f"Backbone '{backbone}' not implemented") - assert False - - return pretrained, scratch - - -def _make_scratch(in_shape, out_shape, groups=1, expand=False): - scratch = nn.Module() - - out_shape1 = out_shape - out_shape2 = out_shape - out_shape3 = out_shape - out_shape4 = out_shape - if expand==True: - out_shape1 = out_shape - out_shape2 = out_shape*2 - out_shape3 = out_shape*4 - out_shape4 = out_shape*8 - - scratch.layer1_rn = nn.Conv2d( - in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer2_rn = nn.Conv2d( - in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer3_rn = nn.Conv2d( - in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - scratch.layer4_rn = nn.Conv2d( - in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups - ) - - return scratch - - -def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False): - efficientnet = torch.hub.load( - "rwightman/gen-efficientnet-pytorch", - "tf_efficientnet_lite3", - pretrained=use_pretrained, - exportable=exportable - ) - return _make_efficientnet_backbone(efficientnet) - - -def _make_efficientnet_backbone(effnet): - pretrained = nn.Module() - - pretrained.layer1 = nn.Sequential( - effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2] - ) - pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3]) - pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5]) - pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9]) - - return pretrained - - -def _make_resnet_backbone(resnet): - pretrained = nn.Module() - pretrained.layer1 = nn.Sequential( - resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1 - ) - - pretrained.layer2 = resnet.layer2 - pretrained.layer3 = resnet.layer3 - pretrained.layer4 = resnet.layer4 - - return pretrained - - -def _make_pretrained_resnext101_wsl(use_pretrained): - resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl") - return _make_resnet_backbone(resnet) - - - -class Interpolate(nn.Module): - """Interpolation module. - """ - - def __init__(self, scale_factor, mode, align_corners=False): - """Init. - - Args: - scale_factor (float): scaling - mode (str): interpolation mode - """ - super(Interpolate, self).__init__() - - self.interp = nn.functional.interpolate - self.scale_factor = scale_factor - self.mode = mode - self.align_corners = align_corners - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: interpolated data - """ - - x = self.interp( - x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners - ) - - return x - - -class ResidualConvUnit(nn.Module): - """Residual convolution module. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.conv1 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.conv2 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True - ) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - out = self.relu(x) - out = self.conv1(out) - out = self.relu(out) - out = self.conv2(out) - - return out + x - - -class FeatureFusionBlock(nn.Module): - """Feature fusion block. - """ - - def __init__(self, features): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock, self).__init__() - - self.resConfUnit1 = ResidualConvUnit(features) - self.resConfUnit2 = ResidualConvUnit(features) - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - output += self.resConfUnit1(xs[1]) - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=True - ) - - return output - - - - -class ResidualConvUnit_custom(nn.Module): - """Residual convolution module. - """ - - def __init__(self, features, activation, bn): - """Init. - - Args: - features (int): number of features - """ - super().__init__() - - self.bn = bn - - self.groups=1 - - self.conv1 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups - ) - - self.conv2 = nn.Conv2d( - features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups - ) - - if self.bn==True: - self.bn1 = nn.BatchNorm2d(features) - self.bn2 = nn.BatchNorm2d(features) - - self.activation = activation - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, x): - """Forward pass. - - Args: - x (tensor): input - - Returns: - tensor: output - """ - - out = self.activation(x) - out = self.conv1(out) - if self.bn==True: - out = self.bn1(out) - - out = self.activation(out) - out = self.conv2(out) - if self.bn==True: - out = self.bn2(out) - - if self.groups > 1: - out = self.conv_merge(out) - - return self.skip_add.add(out, x) - - # return out + x - - -class FeatureFusionBlock_custom(nn.Module): - """Feature fusion block. - """ - - def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): - """Init. - - Args: - features (int): number of features - """ - super(FeatureFusionBlock_custom, self).__init__() - - self.deconv = deconv - self.align_corners = align_corners - - self.groups=1 - - self.expand = expand - out_features = features - if self.expand==True: - out_features = features//2 - - self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) - - self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) - self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) - - self.skip_add = nn.quantized.FloatFunctional() - - def forward(self, *xs): - """Forward pass. - - Returns: - tensor: output - """ - output = xs[0] - - if len(xs) == 2: - res = self.resConfUnit1(xs[1]) - output = self.skip_add.add(output, res) - # output += res - - output = self.resConfUnit2(output) - - output = nn.functional.interpolate( - output, scale_factor=2, mode="bilinear", align_corners=self.align_corners - ) - - output = self.out_conv(output) - - return output - diff --git a/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus.py b/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus.py deleted file mode 100644 index fa962424f68b79f5d89fbf56b1d825e1ccbcf014..0000000000000000000000000000000000000000 --- a/spaces/shi-labs/Versatile-Diffusion/lib/model_zoo/optimus.py +++ /dev/null @@ -1,763 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -import numpy as np -import numpy.random as npr -import copy - -from lib.model_zoo.common.get_model import get_model, register -from lib.model_zoo.common import utils - -from .optimus_models.tokenization_gpt2 import GPT2Tokenizer - -symbol = 'optimus' - -@register('optimus_vae') -class optimus_vae(nn.Module): - """VAE with normal prior""" - def __init__(self, encoder, decoder, tokenizer_encoder, tokenizer_decoder, args): # - super().__init__() - self.encoder = encoder if isinstance(encoder, nn.Module) else get_model()(encoder) - self.decoder = decoder if isinstance(decoder, nn.Module) else get_model()(decoder) - self.tokenizer_encoder = tokenizer_encoder \ - if isinstance(tokenizer_encoder, nn.Module) \ - else get_model()(tokenizer_encoder, verbose=False) - self.tokenizer_decoder = tokenizer_decoder \ - if isinstance(tokenizer_decoder, nn.Module) \ - else get_model()(tokenizer_decoder, verbose=False) - - gpt2_special_tokens_dict = {'pad_token': '', 'bos_token': '', 'eos_token': ''} - if isinstance(self.tokenizer_encoder, GPT2Tokenizer): - self.tokenizer_encoder.add_special_tokens(gpt2_special_tokens_dict) - if isinstance(self.tokenizer_decoder, GPT2Tokenizer): - self.tokenizer_decoder.add_special_tokens(gpt2_special_tokens_dict) - - self.args = args - self.nz = args.latent_size - - self.eos_token_id = self.tokenizer_decoder.convert_tokens_to_ids( - [self.tokenizer_decoder.eos_token])[0] - self.pad_token_id = self.tokenizer_decoder.convert_tokens_to_ids( - [self.tokenizer_decoder.pad_token])[0] - - # connector: from Bert hidden units to the latent space - # self.linear = nn.Linear(args.nz, 2 * args.nz, bias=False) - - # Standard Normal prior - loc = torch.zeros(self.nz) - scale = torch.ones(self.nz) - self.prior = torch.distributions.normal.Normal(loc, scale) - - def connect(self, bert_fea, nsamples=1): - """ - Returns: Tensor1, Tensor2 - Tensor1: the tensor latent z with shape [batch, nsamples, nz] - Tensor2: the tenor of KL for each x with shape [batch] - """ - - # (batch_size, nz) - - mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - # pdb.set_trace() - # mean, logvar = mean.squeeze(0), logvar.squeeze(0) - - # (batch, nsamples, nz) - z = self.reparameterize(mean, logvar, nsamples) - KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) - - return z, KL - - def connect_deterministic(self, bert_fea, nsamples=1): - """ - Returns: Tensor1, Tensor2 - Tensor1: the tensor latent z with shape [batch, nsamples, nz] - Tensor2: the tenor of KL for each x with shape [batch] - """ - - # (batch_size, nz) - - mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - # pdb.set_trace() - # mean, logvar = mean.squeeze(0), logvar.squeeze(0) - - logvar.fill_(.0) - # (batch, nsamples, nz) - z = self.reparameterize(mean, logvar, nsamples) - KL = 0.5 * (mean.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) - - return z, KL - - def reparameterize(self, mu, logvar, nsamples=1): - """sample from posterior Gaussian family - Args: - mu: Tensor - Mean of gaussian distribution with shape (batch, nz) - logvar: Tensor - logvar of gaussian distibution with shape (batch, nz) - Returns: Tensor - Sampled z with shape (batch, nsamples, nz) - """ - batch_size, nz = mu.size() - std = logvar.mul(0.5).exp() - - mu_expd = mu.unsqueeze(1).expand(batch_size, nsamples, nz) - std_expd = std.unsqueeze(1).expand(batch_size, nsamples, nz) - - eps = torch.zeros_like(std_expd).normal_() - - return mu_expd + torch.mul(eps, std_expd) - - def forward(self, inputs, labels): - - # pdb.set_trace() - - attention_mask=(inputs > 0).float() - # logger.info(inputs) - # logger.info(attention_mask) - # logger.info(labels) - reconstrution_mask=(labels != 50257).float() # 50257 is the padding token for GPT2 - sent_length = torch.sum(reconstrution_mask, dim=1) - - - outputs = self.encoder(inputs, attention_mask) - pooled_hidden_fea = outputs[1] # model outputs are always tuple in pytorch-transformers (see doc) - - if self.args.fb_mode==0: - # Connect hidden feature to the latent space - latent_z, loss_kl = self.connect(pooled_hidden_fea) - latent_z = latent_z.squeeze(1) - - - # Decoding - outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) - loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) - - elif self.args.fb_mode==1: - # Connect hidden feature to the latent space - mu, logvar = self.encoder.linear(pooled_hidden_fea).chunk(2, -1) - latent_z = self.reparameterize(mu, logvar, nsamples=1) - latent_z = latent_z.squeeze(1) - loss_kl = 0.5 * (mu.pow(2) + logvar.exp() - logvar - 1) - kl_mask = (loss_kl > self.args.dim_target_kl).float() - loss_kl = (kl_mask * loss_kl).sum(dim=1) - - # pdb.set_trace() - # past = self.decoder.linear(latent_z) - # Decoding - outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) - loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) - - elif self.args.fb_mode==2: - # Connect hidden feature to the latent space - latent_z, loss_kl = self.connect_deterministic(pooled_hidden_fea) - latent_z = latent_z.squeeze(1) - - # past = self.decoder.linear(latent_z) - # Decoding - outputs = self.decoder(input_ids=labels, past=latent_z, labels=labels, label_ignore=self.pad_token_id) - loss_rec = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc) - - - # pdb.set_trace() - if self.args.length_weighted_loss: - loss = loss_rec / sent_length + self.args.beta * loss_kl - else: - loss = loss_rec + self.args.beta * loss_kl - - - return loss_rec, loss_kl, loss - - def encoder_sample(self, bert_fea, nsamples): - """sampling from the encoder - Returns: Tensor1 - Tensor1: the tensor latent z with shape [batch, nsamples, nz] - """ - - # (batch_size, nz) - - mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - mu, logvar = mu.squeeze(0), logvar.squeeze(0) - - # (batch, nsamples, nz) - z = self.reparameterize(mu, logvar, nsamples) - - return z, (mu, logvar) - - def encode_stats(self, x): - """ - Returns: Tensor1, Tensor2 - Tensor1: the mean of latent z with shape [batch, nz] - Tensor2: the logvar of latent z with shape [batch, nz] - """ - - return self.encoder.encode_stats(x) - - def decode(self, z, strategy, K=10): - """generate samples from z given strategy - Args: - z: [batch, nsamples, nz] - strategy: "beam" or "greedy" or "sample" - K: the beam width parameter - Returns: List1 - List1: a list of decoded word sequence - """ - - if strategy == "beam": - return self.decoder.beam_search_decode(z, K) - elif strategy == "greedy": - return self.decoder.greedy_decode(z) - elif strategy == "sample": - return self.decoder.sample_decode(z) - else: - raise ValueError("the decoding strategy is not supported") - - def reconstruct(self, x, decoding_strategy="greedy", K=5): - """reconstruct from input x - Args: - x: (batch, *) - decoding_strategy: "beam" or "greedy" or "sample" - K: the beam width parameter - Returns: List1 - List1: a list of decoded word sequence - """ - z = self.sample_from_inference(x).squeeze(1) - - return self.decode(z, decoding_strategy, K) - - def log_probability(self, x, z): - """Cross Entropy in the language case - Args: - x: (batch_size, seq_len) - z: (batch_size, n_sample, nz) - Returns: - log_p: (batch_size, n_sample). - log_p(x|z) across different x and z - """ - outputs = self.decoder(input_ids=x, past=z, labels=x, label_ignore=self.pad_token_id) - loss_rec = outputs[0] - return -loss_rec - - def loss_iw(self, x0, x1, nsamples=50, ns=1): - """ - Args: - x: if the data is constant-length, x is the data tensor with - shape (batch, *). Otherwise x is a tuple that contains - the data tensor and length list - Returns: Tensor1, Tensor2, Tensor3 - Tensor1: total loss [batch] - Tensor2: reconstruction loss shape [batch] - Tensor3: KL loss shape [batch] - """ - - # encoding into bert features - bert_fea = self.encoder(x0)[1] - - # (batch_size, nz) - - mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - - - ################## - # compute KL - ################## - # pdb.set_trace() - KL = 0.5 * (mu.pow(2) + logvar.exp() - logvar - 1).sum(dim=1) - - # mu, logvar = mu.squeeze(0), logvar.squeeze(0) - ll_tmp, rc_tmp = [], [] - for _ in range(int(nsamples / ns)): - - # (batch, nsamples, nz) - z = self.reparameterize(mu, logvar, ns) - # past = self.decoder.linear(z) - past = z - - # [batch, nsamples] - log_prior = self.eval_prior_dist(z) - log_gen = self.eval_cond_ll(x1, past) - log_infer = self.eval_inference_dist(z, (mu, logvar)) - - # pdb.set_trace() - log_gen = log_gen.unsqueeze(0).contiguous().view(z.shape[0],-1) - - - # pdb.set_trace() - rc_tmp.append(log_gen) - ll_tmp.append(log_gen + log_prior - log_infer) - - - - log_prob_iw = log_sum_exp(torch.cat(ll_tmp, dim=-1), dim=-1) - math.log(nsamples) - log_gen_iw = torch.mean(torch.cat(rc_tmp, dim=-1), dim=-1) - - return log_prob_iw, log_gen_iw , KL - - def nll_iw(self, x0, x1, nsamples, ns=1): - """compute the importance weighting estimate of the log-likelihood - Args: - x0, x1: two different tokenization results of x, where x is the data tensor with shape (batch, *). - nsamples: Int - the number of samples required to estimate marginal data likelihood - Returns: Tensor1 - Tensor1: the estimate of log p(x), shape [batch] - """ - - # compute iw every ns samples to address the memory issue - # nsamples = 500, ns = 100 - # nsamples = 500, ns = 10 - - # TODO: note that x is forwarded twice in self.encoder.sample(x, ns) and self.eval_inference_dist(x, z, param) - #. this problem is to be solved in order to speed up - - tmp = [] - for _ in range(int(nsamples / ns)): - # [batch, ns, nz] - - # Chunyuan: - # encoding into bert features - pooled_hidden_fea = self.encoder(x0)[1] - - # param is the parameters required to evaluate q(z|x) - z, param = self.encoder_sample(pooled_hidden_fea, ns) - - # [batch, ns] - log_comp_ll = self.eval_complete_ll(x1, z) - log_infer_ll = self.eval_inference_dist(z, param) - - tmp.append(log_comp_ll - log_infer_ll) - - ll_iw = log_sum_exp(torch.cat(tmp, dim=-1), dim=-1) - math.log(nsamples) - - return ll_iw - - def KL(self, x): - _, KL = self.encode(x, 1) - - return KL - - def eval_prior_dist(self, zrange): - """perform grid search to calculate the true posterior - Args: - zrange: tensor - different z points that will be evaluated, with - shape (k^2, nz), where k=(zmax - zmin)/space - """ - - # (k^2) - return self.prior.log_prob(zrange).sum(dim=-1) - - def eval_complete_ll(self, x, z): - """compute log p(z,x) - Args: - x: Tensor - input with shape [batch, seq_len] - z: Tensor - evaluation points with shape [batch, nsamples, nz] - Returns: Tensor1 - Tensor1: log p(z,x) Tensor with shape [batch, nsamples] - """ - - # [batch, nsamples] - log_prior = self.eval_prior_dist(z) - log_gen = self.eval_cond_ll(x, z) - - return log_prior + log_gen - - def eval_cond_ll(self, x, z): - """compute log p(x|z) - """ - x_shape = list(x.size()) - z_shape = list(z.size()) - if len(z_shape) == 3: - x = x.unsqueeze(1).repeat(1, z_shape[1], 1).contiguous().view(x_shape[0]*z_shape[1], x_shape[-1]) - z = z.contiguous().view(x_shape[0]*z_shape[1], z_shape[-1]) - - return self.log_probability(x, z) - - def eval_log_model_posterior(self, x, grid_z): - """perform grid search to calculate the true posterior - this function computes p(z|x) - Args: - grid_z: tensor - different z points that will be evaluated, with - shape (k^2, nz), where k=(zmax - zmin)/pace - Returns: Tensor - Tensor: the log posterior distribution log p(z|x) with - shape [batch_size, K^2] - """ - try: - batch_size = x.size(0) - except: - batch_size = x[0].size(0) - - # (batch_size, k^2, nz) - grid_z = grid_z.unsqueeze(0).expand(batch_size, *grid_z.size()).contiguous() - - # (batch_size, k^2) - log_comp = self.eval_complete_ll(x, grid_z) - - # normalize to posterior - log_posterior = log_comp - log_sum_exp(log_comp, dim=1, keepdim=True) - - return log_posterior - - def sample_from_inference(self, x, nsamples=1): - """perform sampling from inference net - Returns: Tensor - Tensor: samples from infernece nets with - shape (batch_size, nsamples, nz) - """ - z, _ = self.encoder.sample(x, nsamples) - - return z - - def sample_from_posterior(self, x, nsamples): - """perform MH sampling from model posterior - Returns: Tensor - Tensor: samples from model posterior with - shape (batch_size, nsamples, nz) - """ - - # use the samples from inference net as initial points - # for MCMC sampling. [batch_size, nsamples, nz] - cur = self.encoder.sample_from_inference(x, 1) - cur_ll = self.eval_complete_ll(x, cur) - total_iter = self.args.mh_burn_in + nsamples * self.args.mh_thin - samples = [] - for iter_ in range(total_iter): - next = torch.normal(mean=cur, - std=cur.new_full(size=cur.size(), fill_value=self.args.mh_std)) - # [batch_size, 1] - next_ll = self.eval_complete_ll(x, next) - ratio = next_ll - cur_ll - - accept_prob = torch.min(ratio.exp(), ratio.new_ones(ratio.size())) - - uniform_t = accept_prob.new_empty(accept_prob.size()).uniform_() - - # [batch_size, 1] - mask = (uniform_t < accept_prob).float() - mask_ = mask.unsqueeze(2) - - cur = mask_ * next + (1 - mask_) * cur - cur_ll = mask * next_ll + (1 - mask) * cur_ll - - if iter_ >= self.args.mh_burn_in and (iter_ - self.args.mh_burn_in) % self.args.mh_thin == 0: - samples.append(cur.unsqueeze(1)) - - return torch.cat(samples, dim=1) - - def calc_model_posterior_mean(self, x, grid_z): - """compute the mean value of model posterior, i.e. E_{z ~ p(z|x)}[z] - Args: - grid_z: different z points that will be evaluated, with - shape (k^2, nz), where k=(zmax - zmin)/pace - x: [batch, *] - Returns: Tensor1 - Tensor1: the mean value tensor with shape [batch, nz] - """ - - # [batch, K^2] - log_posterior = self.eval_log_model_posterior(x, grid_z) - posterior = log_posterior.exp() - - # [batch, nz] - return torch.mul(posterior.unsqueeze(2), grid_z.unsqueeze(0)).sum(1) - - def calc_infer_mean(self, x): - """ - Returns: Tensor1 - Tensor1: the mean of inference distribution, with shape [batch, nz] - """ - - mean, logvar = self.encoder.forward(x) - - return mean - - def eval_inference_dist(self, z, param): - """this function computes log q(z | x) - Args: - z: tensor - different z points that will be evaluated, with - shape [batch, nsamples, nz] - Returns: Tensor1 - Tensor1: log q(z|x) with shape [batch, nsamples] - """ - - nz = z.size(2) - mu, logvar = param - - # (batch_size, 1, nz) - mu, logvar = mu.unsqueeze(1), logvar.unsqueeze(1) - var = logvar.exp() - - # (batch_size, nsamples, nz) - dev = z - mu - - # (batch_size, nsamples) - log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - \ - 0.5 * (nz * math.log(2 * math.pi) + logvar.sum(-1)) - - return log_density - - def calc_mi(self, test_data_batch, args): - # calc_mi_v3 - import math - from modules.utils import log_sum_exp - - mi = 0 - num_examples = 0 - - mu_batch_list, logvar_batch_list = [], [] - neg_entropy = 0. - for batch_data in test_data_batch: - - x0, _, _ = batch_data - x0 = x0.to(args.device) - - # encoding into bert features - bert_fea = self.encoder(x0)[1] - - (batch_size, nz) - mu, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - - x_batch, nz = mu.size() - - #print(x_batch, end=' ') - - num_examples += x_batch - - # E_{q(z|x)}log(q(z|x)) = -0.5*nz*log(2*\pi) - 0.5*(1+logvar).sum(-1) - - neg_entropy += (-0.5 * nz * math.log(2 * math.pi)- 0.5 * (1 + logvar).sum(-1)).sum().item() - mu_batch_list += [mu.cpu()] - logvar_batch_list += [logvar.cpu()] - - pdb.set_trace() - - neg_entropy = neg_entropy / num_examples - ##print() - - num_examples = 0 - log_qz = 0. - for i in range(len(mu_batch_list)): - ############### - # get z_samples - ############### - mu, logvar = mu_batch_list[i].cuda(), logvar_batch_list[i].cuda() - - # [z_batch, 1, nz] - - z_samples = self.reparameterize(mu, logvar, 1) - - z_samples = z_samples.view(-1, 1, nz) - num_examples += z_samples.size(0) - - ############### - # compute density - ############### - # [1, x_batch, nz] - #mu, logvar = mu_batch_list[i].cuda(), logvar_batch_list[i].cuda() - #indices = list(np.random.choice(np.arange(len(mu_batch_list)), 10)) + [i] - indices = np.arange(len(mu_batch_list)) - mu = torch.cat([mu_batch_list[_] for _ in indices], dim=0).cuda() - logvar = torch.cat([logvar_batch_list[_] for _ in indices], dim=0).cuda() - x_batch, nz = mu.size() - - mu, logvar = mu.unsqueeze(0), logvar.unsqueeze(0) - var = logvar.exp() - - # (z_batch, x_batch, nz) - dev = z_samples - mu - - # (z_batch, x_batch) - log_density = -0.5 * ((dev ** 2) / var).sum(dim=-1) - \ - 0.5 * (nz * math.log(2 * math.pi) + logvar.sum(-1)) - - # log q(z): aggregate posterior - # [z_batch] - log_qz += (log_sum_exp(log_density, dim=1) - math.log(x_batch)).sum(-1) - - log_qz /= num_examples - mi = neg_entropy - log_qz - - return mi - - def calc_au(self, eval_dataloader, args, delta=0.01): - """compute the number of active units - """ - cnt = 0 - for batch_data in eval_dataloader: - - x0, _, _ = batch_data - x0 = x0.to(args.device) - - # encoding into bert features - bert_fea = self.encoder(x0)[1] - - # (batch_size, nz) - mean, logvar = self.encoder.linear(bert_fea).chunk(2, -1) - - if cnt == 0: - means_sum = mean.sum(dim=0, keepdim=True) - else: - means_sum = means_sum + mean.sum(dim=0, keepdim=True) - cnt += mean.size(0) - - # (1, nz) - mean_mean = means_sum / cnt - - cnt = 0 - for batch_data in eval_dataloader: - - x0, _, _ = batch_data - x0 = x0.to(args.device) - - # encoding into bert features - bert_fea = self.encoder(x0)[1] - - # (batch_size, nz) - mean, _ = self.encoder.linear(bert_fea).chunk(2, -1) - - if cnt == 0: - var_sum = ((mean - mean_mean) ** 2).sum(dim=0) - else: - var_sum = var_sum + ((mean - mean_mean) ** 2).sum(dim=0) - cnt += mean.size(0) - - # (nz) - au_var = var_sum / (cnt - 1) - - return (au_var >= delta).sum().item(), au_var - -from .optimus_models.optimus_bert import BertForLatentConnector_XX - -@register('optimus_bert_connector') -class optimus_bert_connector(BertForLatentConnector_XX): - pass - -from .optimus_models.tokenization_bert import BertTokenizer - -@register('optimus_bert_tokenizer') -class optimus_bert_tokenizer(BertTokenizer): - pass - -from .optimus_models.optimus_gpt2 import GPT2ForLatentConnector_XX - -@register('optimus_gpt2_connector') -class optimus_gpt2_connector(GPT2ForLatentConnector_XX): - pass - -from .optimus_models.tokenization_gpt2 import GPT2Tokenizer - -@register('optimus_gpt2_tokenizer') -class optimus_gpt2_tokenizer(GPT2Tokenizer): - pass - -############################## -# some helpers for inference # -############################## - -def sample_single_sequence_conditional( - model, - context, - past=None, - temperature=1, - top_k=0, - top_p=0.0, - eos_token=50829, - max_length=30, ): - - past = past.unsqueeze(0) - generated = context.unsqueeze(0) - with torch.no_grad(): - while True: - # for _ in trange(length): - inputs = {'input_ids': generated, 'past': past} - outputs = model(**inputs) - next_token_logits = outputs[0][0, -1, :] / temperature - filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p) - next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1) - generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1) - if next_token[0].item() == eos_token: - break - if generated.shape[1] >= max_length: - generated[0, -1] = eos_token - break - return generated.squeeze(0) - -def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')): - """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering - Args: - logits: logits distribution shape (vocabulary size) - top_k > 0: keep only top k tokens with highest probability (top-k filtering). - top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). - Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) - From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 - """ - assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear - top_k = min(top_k, logits.size(-1)) # Safety check - if top_k > 0: - # Remove all tokens with a probability less than the last token of the top-k - indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] - logits[indices_to_remove] = filter_value - - if top_p > 0.0: - sorted_logits, sorted_indices = torch.sort(logits, descending=True) - cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) - - # Remove tokens with cumulative probability above the threshold - sorted_indices_to_remove = cumulative_probs > top_p - # Shift the indices to the right to keep also the first token above the threshold - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 - - indices_to_remove = sorted_indices[sorted_indices_to_remove] - logits[indices_to_remove] = filter_value - return logits - -######################## -# compatible to vd 2.0 # -######################## - -@register('optimus_vae_next') -class optimus_vae_next(optimus_vae): - def get_device(self): - return self.encoder.linear.weight.device - - def encode(self, text, max_length=77): - tokenizer = self.tokenizer_encoder - token = [tokenizer.tokenize(sentence.lower()) for sentence in text] - token = [ti[0:max_length] for ti in token] - token_id = [] - for tokeni in token: - token_sentence = [tokenizer._convert_token_to_id(i) for i in tokeni] - token_sentence = tokenizer.add_special_tokens_single_sentence(token_sentence) - token_id.append(torch.LongTensor(token_sentence)) - token_id = torch._C._nn.pad_sequence(token_id, batch_first=True, padding_value=0.0) - token_id = token_id.to(self.get_device()) - z = self.encoder(token_id, attention_mask=(token_id > 0).float())[1] - z_mu, z_logvar = self.encoder.linear(z).chunk(2, -1) - # z_sampled = self.reparameterize(z_mu, z_logvar, 1) - return z_mu.squeeze(1) - - @torch.no_grad() - def decode(self, z, temperature=1.0): - bos_token = self.tokenizer_decoder.encode('') - eos_token = self.tokenizer_decoder.encode('') - context_tokens = torch.LongTensor(bos_token).to(z.device) - sentenses = [] - for zi in z: - out = sample_single_sequence_conditional( - model=self.decoder, - context=context_tokens, - past=zi, temperature=temperature, - top_k=0, top_p=1.0, - max_length=30, - eos_token = eos_token[0],) - text = self.tokenizer_decoder.decode(out.tolist(), clean_up_tokenization_spaces=True) - text = text.split()[1:-1] - text = ' '.join(text) - sentenses.append(text) - return sentenses diff --git a/spaces/shikunl/prismer/prismer/utils.py b/spaces/shikunl/prismer/prismer/utils.py deleted file mode 100644 index 3ec6a8353d18abcd91cde155b40e5c5ec826fc6f..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/utils.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2023, NVIDIA Corporation & Affiliates. All rights reserved. -# -# This work is made available under the Nvidia Source Code License-NC. -# To view a copy of this license, visit -# https://github.com/NVlabs/prismer/blob/main/LICENSE - -import math -import numpy as np -from pycocotools.coco import COCO -from pycocoevalcap.eval import COCOEvalCap - - -def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): - """Decay the learning rate""" - lr = (init_lr - min_lr) * 0.5 * (1. + math.cos(math.pi * epoch / max_epoch)) + min_lr - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - -def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): - """Warmup the learning rate""" - lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max_step) - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - -def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): - """Decay the learning rate""" - lr = max(min_lr, init_lr * (decay_rate ** epoch)) - for param_group in optimizer.param_groups: - param_group['lr'] = lr - - -def coco_caption_eval(coco_gt_root, results_file): - coco = COCO(coco_gt_root) - coco_result = coco.loadRes(results_file) - coco_eval = COCOEvalCap(coco, coco_result) - coco_eval.evaluate() - for metric, score in coco_eval.eval.items(): - print(f'{metric}: {score:.3f}') - return coco_eval - - -def create_ade20k_label_colormap(): - """Creates a label colormap used in ADE20K segmentation benchmark. - Returns: - A colormap for visualizing segmentation results. - """ - return np.asarray([ - [0, 0, 0], - [120, 120, 120], - [180, 120, 120], - [6, 230, 230], - [80, 50, 50], - [4, 200, 3], - [120, 120, 80], - [140, 140, 140], - [204, 5, 255], - [230, 230, 230], - [4, 250, 7], - [224, 5, 255], - [235, 255, 7], - [150, 5, 61], - [120, 120, 70], - [8, 255, 51], - [255, 6, 82], - [143, 255, 140], - [204, 255, 4], - [255, 51, 7], - [204, 70, 3], - [0, 102, 200], - [61, 230, 250], - [255, 6, 51], - [11, 102, 255], - [255, 7, 71], - [255, 9, 224], - [9, 7, 230], - [220, 220, 220], - [255, 9, 92], - [112, 9, 255], - [8, 255, 214], - [7, 255, 224], - [255, 184, 6], - [10, 255, 71], - [255, 41, 10], - [7, 255, 255], - [224, 255, 8], - [102, 8, 255], - [255, 61, 6], - [255, 194, 7], - [255, 122, 8], - [0, 255, 20], - [255, 8, 41], - [255, 5, 153], - [6, 51, 255], - [235, 12, 255], - [160, 150, 20], - [0, 163, 255], - [140, 140, 140], - [250, 10, 15], - [20, 255, 0], - [31, 255, 0], - [255, 31, 0], - [255, 224, 0], - [153, 255, 0], - [0, 0, 255], - [255, 71, 0], - [0, 235, 255], - [0, 173, 255], - [31, 0, 255], - [11, 200, 200], - [255, 82, 0], - [0, 255, 245], - [0, 61, 255], - [0, 255, 112], - [0, 255, 133], - [255, 0, 0], - [255, 163, 0], - [255, 102, 0], - [194, 255, 0], - [0, 143, 255], - [51, 255, 0], - [0, 82, 255], - [0, 255, 41], - [0, 255, 173], - [10, 0, 255], - [173, 255, 0], - [0, 255, 153], - [255, 92, 0], - [255, 0, 255], - [255, 0, 245], - [255, 0, 102], - [255, 173, 0], - [255, 0, 20], - [255, 184, 184], - [0, 31, 255], - [0, 255, 61], - [0, 71, 255], - [255, 0, 204], - [0, 255, 194], - [0, 255, 82], - [0, 10, 255], - [0, 112, 255], - [51, 0, 255], - [0, 194, 255], - [0, 122, 255], - [0, 255, 163], - [255, 153, 0], - [0, 255, 10], - [255, 112, 0], - [143, 255, 0], - [82, 0, 255], - [163, 255, 0], - [255, 235, 0], - [8, 184, 170], - [133, 0, 255], - [0, 255, 92], - [184, 0, 255], - [255, 0, 31], - [0, 184, 255], - [0, 214, 255], - [255, 0, 112], - [92, 255, 0], - [0, 224, 255], - [112, 224, 255], - [70, 184, 160], - [163, 0, 255], - [153, 0, 255], - [71, 255, 0], - [255, 0, 163], - [255, 204, 0], - [255, 0, 143], - [0, 255, 235], - [133, 255, 0], - [255, 0, 235], - [245, 0, 255], - [255, 0, 122], - [255, 245, 0], - [10, 190, 212], - [214, 255, 0], - [0, 204, 255], - [20, 0, 255], - [255, 255, 0], - [0, 153, 255], - [0, 41, 255], - [0, 255, 204], - [41, 0, 255], - [41, 255, 0], - [173, 0, 255], - [0, 245, 255], - [71, 0, 255], - [122, 0, 255], - [0, 255, 184], - [0, 92, 255], - [184, 255, 0], - [0, 133, 255], - [255, 214, 0], - [25, 194, 194], - [102, 255, 0], - [92, 0, 255], - ]) diff --git a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/file_client.py b/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/file_client.py deleted file mode 100644 index 7f38d9796da3899048924f2f803d1088927966b0..0000000000000000000000000000000000000000 --- a/spaces/shiwan10000/CodeFormer/CodeFormer/basicsr/utils/file_client.py +++ /dev/null @@ -1,167 +0,0 @@ -# Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py # noqa: E501 -from abc import ABCMeta, abstractmethod - - -class BaseStorageBackend(metaclass=ABCMeta): - """Abstract class of storage backends. - - All backends need to implement two apis: ``get()`` and ``get_text()``. - ``get()`` reads the file as a byte stream and ``get_text()`` reads the file - as texts. - """ - - @abstractmethod - def get(self, filepath): - pass - - @abstractmethod - def get_text(self, filepath): - pass - - -class MemcachedBackend(BaseStorageBackend): - """Memcached storage backend. - - Attributes: - server_list_cfg (str): Config file for memcached server list. - client_cfg (str): Config file for memcached client. - sys_path (str | None): Additional path to be appended to `sys.path`. - Default: None. - """ - - def __init__(self, server_list_cfg, client_cfg, sys_path=None): - if sys_path is not None: - import sys - sys.path.append(sys_path) - try: - import mc - except ImportError: - raise ImportError('Please install memcached to enable MemcachedBackend.') - - self.server_list_cfg = server_list_cfg - self.client_cfg = client_cfg - self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg, self.client_cfg) - # mc.pyvector servers as a point which points to a memory cache - self._mc_buffer = mc.pyvector() - - def get(self, filepath): - filepath = str(filepath) - import mc - self._client.Get(filepath, self._mc_buffer) - value_buf = mc.ConvertBuffer(self._mc_buffer) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class HardDiskBackend(BaseStorageBackend): - """Raw hard disks storage backend.""" - - def get(self, filepath): - filepath = str(filepath) - with open(filepath, 'rb') as f: - value_buf = f.read() - return value_buf - - def get_text(self, filepath): - filepath = str(filepath) - with open(filepath, 'r') as f: - value_buf = f.read() - return value_buf - - -class LmdbBackend(BaseStorageBackend): - """Lmdb storage backend. - - Args: - db_paths (str | list[str]): Lmdb database paths. - client_keys (str | list[str]): Lmdb client keys. Default: 'default'. - readonly (bool, optional): Lmdb environment parameter. If True, - disallow any write operations. Default: True. - lock (bool, optional): Lmdb environment parameter. If False, when - concurrent access occurs, do not lock the database. Default: False. - readahead (bool, optional): Lmdb environment parameter. If False, - disable the OS filesystem readahead mechanism, which may improve - random read performance when a database is larger than RAM. - Default: False. - - Attributes: - db_paths (list): Lmdb database path. - _client (list): A list of several lmdb envs. - """ - - def __init__(self, db_paths, client_keys='default', readonly=True, lock=False, readahead=False, **kwargs): - try: - import lmdb - except ImportError: - raise ImportError('Please install lmdb to enable LmdbBackend.') - - if isinstance(client_keys, str): - client_keys = [client_keys] - - if isinstance(db_paths, list): - self.db_paths = [str(v) for v in db_paths] - elif isinstance(db_paths, str): - self.db_paths = [str(db_paths)] - assert len(client_keys) == len(self.db_paths), ('client_keys and db_paths should have the same length, ' - f'but received {len(client_keys)} and {len(self.db_paths)}.') - - self._client = {} - for client, path in zip(client_keys, self.db_paths): - self._client[client] = lmdb.open(path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) - - def get(self, filepath, client_key): - """Get values according to the filepath from one lmdb named client_key. - - Args: - filepath (str | obj:`Path`): Here, filepath is the lmdb key. - client_key (str): Used for distinguishing differnet lmdb envs. - """ - filepath = str(filepath) - assert client_key in self._client, (f'client_key {client_key} is not ' 'in lmdb clients.') - client = self._client[client_key] - with client.begin(write=False) as txn: - value_buf = txn.get(filepath.encode('ascii')) - return value_buf - - def get_text(self, filepath): - raise NotImplementedError - - -class FileClient(object): - """A general file client to access files in different backend. - - The client loads a file or text in a specified backend from its path - and return it as a binary file. it can also register other backend - accessor with a given name and backend class. - - Attributes: - backend (str): The storage backend type. Options are "disk", - "memcached" and "lmdb". - client (:obj:`BaseStorageBackend`): The backend object. - """ - - _backends = { - 'disk': HardDiskBackend, - 'memcached': MemcachedBackend, - 'lmdb': LmdbBackend, - } - - def __init__(self, backend='disk', **kwargs): - if backend not in self._backends: - raise ValueError(f'Backend {backend} is not supported. Currently supported ones' - f' are {list(self._backends.keys())}') - self.backend = backend - self.client = self._backends[backend](**kwargs) - - def get(self, filepath, client_key='default'): - # client_key is used only for lmdb, where different fileclients have - # different lmdb environments. - if self.backend == 'lmdb': - return self.client.get(filepath, client_key) - else: - return self.client.get(filepath) - - def get_text(self, filepath): - return self.client.get_text(filepath) diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bitcoin Address Utility APK Explore the Bitcoin Blockchain with Ease..md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bitcoin Address Utility APK Explore the Bitcoin Blockchain with Ease..md deleted file mode 100644 index 1755bc6c8cc3316c17394fa1decd2e0691bdc12b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Bitcoin Address Utility APK Explore the Bitcoin Blockchain with Ease..md +++ /dev/null @@ -1,192 +0,0 @@ - -

        Bitcoin Address Utility APK: What Is It and How to Use It?

        -

        Bitcoin is a decentralized digital currency that allows you to send and receive payments without intermediaries or censorship. However, to use bitcoin, you need a bitcoin address, which is a unique identifier that represents your bitcoin wallet. A bitcoin address utility is a tool that helps you manage your bitcoin addresses, such as generating, converting, verifying, signing, and encrypting them. In this article, we will introduce you to one of the best bitcoin address utilities available for Android devices: the Bitcoin Address Utility APK. We will explain what it is, what features it offers, how to download and install it, and how to use it.

        -

        Introduction

        -

        What is a bitcoin address?

        -

        A bitcoin address is a string of alphanumeric characters that starts with either 1, 3, or bc1. It is derived from a public key, which is itself derived from a private key. A private key is a secret number that allows you to spend your bitcoins. A public key is a mathematical representation of your private key that can be shared with others. A bitcoin address is like an email address that you can use to receive bitcoins from others.

        -

        bitcoin address utility apk


        Download ✪✪✪ https://ssurll.com/2uNUtv



        -

        What is a bitcoin address utility?

        -

        A bitcoin address utility is a software application that helps you perform various operations on your bitcoin addresses, such as generating new ones, converting between different formats, verifying their validity and balance, signing transactions, creating and scanning QR codes, and encrypting and decrypting your private keys. A bitcoin address utility can be useful for both beginners and advanced users who want to have more control over their bitcoin wallets.

        -

        Why do you need a bitcoin address utility apk?

        -

        An apk file is an Android application package file that contains all the files and code needed to install and run an app on your Android device. A bitcoin address utility apk is an apk file that contains a bitcoin address utility app. You may need a bitcoin address utility apk if you want to use a bitcoin address utility app that is not available on the Google Play Store, or if you want to install it on a device that does not have access to the Google Play Store. For example, some countries may restrict or ban access to certain apps or websites related to cryptocurrencies. By downloading and installing a bitcoin address utility apk, you can bypass these restrictions and enjoy the benefits of using a bitcoin address utility app.

        -

        Features of Bitcoin Address Utility APK

        -

        Generate and convert bitcoin addresses

        -

        One of the main features of the Bitcoin Address Utility APK is that it allows you to generate new bitcoin addresses or import existing ones from your private keys or public keys. You can also convert between different types of bitcoin addresses, such as legacy (starting with 1), P2SH (starting with 3), or SegWit (starting with bc1). This can be useful if you want to use different types of wallets or services that support different types of addresses.

        -

        Verify and sign transactions

        -

        Another feature of the Bitcoin Address Utility APK is that it allows you to verify the validity and balance of any bitcoin address by checking its transactions on the blockchain. You can also sign transactions with your private keys and broadcast them to the network. This can be useful if you want to send bitcoins from your address to another address, or if you want to verify that you own a certain address.

        -

        Create and scan QR codes

        -

        A QR code is a two-dimensional barcode that can store information such as text, numbers, or URLs. A QR code can be scanned by a camera or a scanner to access the information. The Bitcoin Address Utility APK allows you to create QR codes for your bitcoin addresses, private keys, public keys, or transactions. You can also scan QR codes from other sources to import or verify them. This can be useful if you want to make payments or receive payments with your bitcoin addresses without typing them manually.

        -

        Encrypt and decrypt private keys

        -

        Your private keys are the most important and sensitive part of your bitcoin wallet. If you lose them or expose them to others, you may lose access to your bitcoins or have them stolen. Therefore, it is essential to protect your private keys with encryption. Encryption is a process of transforming data into an unreadable form using a secret key or password. Decryption is the reverse process of restoring the data to its original form using the same key or password. The Bitcoin Address Utility APK allows you to encrypt and decrypt your private keys with a password of your choice. This can be useful if you want to store your private keys securely or share them with others safely.

        -

        How to Download and Install Bitcoin Address Utility APK

        -

        Download the apk file from a trusted source

        -

        The first step to use the Bitcoin Address Utility APK is to download the apk file from a trusted source. You can find the apk file on various websites that offer apk downloads, such as APKPure, APKMirror, or APKMonk. However, you should be careful and avoid downloading apk files from unknown or suspicious sources, as they may contain malware or viruses that can harm your device or steal your data. You should also check the reviews and ratings of the apk file before downloading it, and make sure that it is compatible with your device and Android version.

        -

        Enable unknown sources on your device

        -

        The second step to use the Bitcoin Address Utility APK is to enable unknown sources on your device. Unknown sources are sources that are not verified by Google or the device manufacturer. By default, Android devices do not allow installing apps from unknown sources for security reasons. However, you can enable unknown sources by following these steps:

        -

        bitcoin address generator apk
        -bitcoin address converter apk
        -bitcoin address validator apk
        -bitcoin address scanner apk
        -bitcoin address monitor apk
        -bitcoin address balance apk
        -bitcoin address explorer apk
        -bitcoin address tracker apk
        -bitcoin address checker apk
        -bitcoin address finder apk
        -bitcoin address creator apk
        -bitcoin address analyzer apk
        -bitcoin address extractor apk
        -bitcoin address verifier apk
        -bitcoin address manager apk
        -bitcoin address watcher apk
        -bitcoin address history apk
        -bitcoin address lookup apk
        -bitcoin address maker apk
        -bitcoin address tool apk
        -bitcoin address backup apk
        -bitcoin address recovery apk
        -bitcoin address generator offline apk
        -bitcoin address converter hex apk
        -bitcoin address validator online apk
        -bitcoin address scanner qr code apk
        -bitcoin address monitor push notifications apk
        -bitcoin address balance checker apk
        -bitcoin address explorer blockchain info apk
        -bitcoin address tracker live apk
        -bitcoin address checker script apk
        -bitcoin address finder software apk
        -bitcoin address creator online apk
        -bitcoin address analyzer tool apk
        -bitcoin address extractor from private key apk
        -bitcoin address verifier python apk
        -bitcoin address manager app apk
        -bitcoin address watcher app apk
        -bitcoin address history report apk
        -bitcoin address lookup api apk
        -bitcoin address maker software apk
        -bitcoin address tool online apk
        -bitcoin address backup restore apk
        -bitcoin address recovery service apk

        -
          -
        • Go to Settings > Security > Unknown Sources.
        • -
        • Toggle on the switch to allow installing apps from unknown sources.
        • -
        • Tap OK to confirm.
        • -
        -

        Note that these steps may vary depending on your device model and Android version. You should also disable unknown sources after installing the app to prevent unwanted installations from other sources.

        -

        Install the apk file and launch the app

        -

        The third step to use the Bitcoin Address Utility APK is to install the apk file and launch the app. To do this, follow these steps:

        -
          -
        • Locate the downloaded apk file on your device using a file manager app or a browser.
        • -
        • Tap on the apk file to start the installation process.
        • -
        • Tap Install to confirm.
        • -
        • Wait for the installation to complete.
        • -
        • Tap Open to launch the app.
        • -
        -

        You can also find the app icon on your home screen or app drawer and tap on it to launch the app.

        How to Use Bitcoin Address Utility APK

        -

        Generate a new bitcoin address or import an existing one

        -

        The first step to use the Bitcoin Address Utility APK is to generate a new bitcoin address or import an existing one. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the Generate button on the main screen.
        • -
        • Select the type of address you want to generate: legacy, P2SH, or SegWit.
        • -
        • Tap on the Generate button again to create a new address with a random private key and public key.
        • -
        • Alternatively, you can tap on the Import button to import an existing address from your private key or public key. You can enter the key manually or scan it from a QR code.
        • -
        • You can also tap on the Random button to generate a new address with a random private key and public key.
        • -
        • You can view your address, private key, public key, and QR code on the screen. You can also copy, share, or save them to your device.
        • -
        -

        Convert between different address formats

        -

        The second step to use the Bitcoin Address Utility APK is to convert between different address formats. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the Convert button on the main screen.
        • -
        • Enter the address you want to convert in the input field. You can also scan it from a QR code.
        • -
        • Select the type of address you want to convert to: legacy, P2SH, or SegWit.
        • -
        • Tap on the Convert button to see the converted address and its QR code on the screen. You can also copy, share, or save them to your device.
        • -
        -

        Verify the validity and balance of an address

        -

        The third step to use the Bitcoin Address Utility APK is to verify the validity and balance of an address. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the Verify button on the main screen.
        • -
        • Enter the address you want to verify in the input field. You can also scan it from a QR code.
        • -
        • Tap on the Verify button to see if the address is valid and how much balance it has on the blockchain. You can also view its transactions history and details.
        • -

        Sign and broadcast transactions to the network

        -

        The fourth step to use the Bitcoin Address Utility APK is to sign and broadcast transactions to the network. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the Sign button on the main screen.
        • -
        • Enter the transaction details in the input fields, such as the sender address, receiver address, amount, fee, and message. You can also scan them from a QR code.
        • -
        • Tap on the Sign button to sign the transaction with your private key. You can also scan it from a QR code.
        • -
        • Tap on the Broadcast button to send the transaction to the network. You can also view its status and confirmation on the blockchain.
        • -
        -

        Create and scan QR codes for easy payments

        -

        The fifth step to use the Bitcoin Address Utility APK is to create and scan QR codes for easy payments. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the QR button on the main screen.
        • -
        • Select the type of QR code you want to create: address, private key, public key, or transaction.
        • -
        • Enter the information you want to encode in the QR code in the input fields. You can also scan it from another QR code.
        • -
        • Tap on the Generate button to see the QR code on the screen. You can also copy, share, or save it to your device.
        • -
        • To scan a QR code from another source, tap on the Scan button and point your camera at the QR code. You can also import it from your device.
        • -
        • You can view the information decoded from the QR code on the screen. You can also copy, share, or save it to your device.
        • -
        -

        Encrypt and decrypt your private keys for security

        -

        The sixth step to use the Bitcoin Address Utility APK is to encrypt and decrypt your private keys for security. To do this, follow these steps:

        -
          -
        • Launch the app and tap on the Encrypt button on the main screen.
        • -
        • Enter the private key you want to encrypt in the input field. You can also scan it from a QR code.
        • -
        • Enter a password of your choice in the input field. You can also use a random password generated by the app.
        • -
        • Tap on the Encrypt button to see the encrypted private key on the screen. You can also copy, share, or save it to your device.
        • -
        • To decrypt an encrypted private key, tap on the Decrypt button and enter the encrypted private key and password in the input fields. You can also scan them from a QR code.
        • -
        • Tap on the Decrypt button to see the decrypted private key on the screen. You can also copy, share, or save it to your device.
        • -
        -

        Conclusion

        -

        Summary of the main points

        -

        In conclusion, we have introduced you to one of the best bitcoin address utilities available for Android devices: the Bitcoin Address Utility APK. We have explained what it is, what features it offers, how to download and install it, and how to use it. We have shown you how to generate and convert bitcoin addresses, verify and sign transactions, create and scan QR codes, and encrypt and decrypt private keys using this app. We hope that this article has been helpful and informative for you.

        -

        Call to action and final thoughts

        -

        If you are interested in using this app, you can download it from here . If you have any questions or feedback about this app, you can contact us here . If you want to learn more about bitcoin and cryptocurrencies, you can visit our website here . Thank you for reading this article and happy bitcoin-ing!

        -

        Frequently Asked Questions

        -

        What is Bitcoin Address Utility APK?

        -

        Bitcoin Address Utility APK is an Android application that helps you manage your bitcoin addresses, such as generating, converting, verifying, signing, and encrypting them.

        -

        How do I download and install Bitcoin Address Utility APK?

        -

        You can download and install Bitcoin Address Utility APK by following these steps:

        -
          -
        1. Download the apk file from a trusted source .
        2. -
        3. Enable unknown sources on your device by going to Settings > Security > Unknown Sources.
        4. -
        5. Install the apk file by tapping on it and confirming.
        6. -
        7. Launch the app by tapping on its icon or opening it from your app drawer.
        8. -
        -

        How do I use Bitcoin Address Utility APK?

        -

        You can use Bitcoin Address Utility APK by following these steps:

          -
        1. Generate a new bitcoin address or import an existing one by tapping on the Generate or Import button.
        2. -
        3. Convert between different address formats by tapping on the Convert button and selecting the type of address you want.
        4. -
        5. Verify the validity and balance of an address by tapping on the Verify button and entering or scanning the address.
        6. -
        7. Sign and broadcast transactions to the network by tapping on the Sign button and entering or scanning the transaction details and private key.
        8. -
        9. Create and scan QR codes for easy payments by tapping on the QR button and selecting the type of QR code you want to create or scan.
        10. -
        11. Encrypt and decrypt your private keys for security by tapping on the Encrypt or Decrypt button and entering or scanning the private key and password.
        12. -

        -

        Is Bitcoin Address Utility APK safe and reliable?

        -

        Bitcoin Address Utility APK is safe and reliable as long as you download it from a trusted source and follow the security precautions. You should always check the reviews and ratings of the apk file before downloading it, and make sure that it is compatible with your device and Android version. You should also enable unknown sources only when installing the app, and disable them afterwards. You should also protect your private keys with encryption and password, and never share them with anyone. You should also backup your private keys and addresses in case you lose your device or delete the app.

        -

        What are the benefits of using Bitcoin Address Utility APK?

        -

        The benefits of using Bitcoin Address Utility APK are that it allows you to:

        -
          -
        • Manage your bitcoin addresses easily and conveniently.
        • -
        • Convert between different address formats to use different wallets or services.
        • -
        • Verify the validity and balance of any bitcoin address on the blockchain.
        • -
        • Sign and broadcast transactions to the network without relying on third parties.
        • -
        • Create and scan QR codes for easy payments without typing manually.
        • -
        • Encrypt and decrypt your private keys for security and privacy.
        • -
        -

        What are the drawbacks of using Bitcoin Address Utility APK?

        -

        The drawbacks of using Bitcoin Address Utility APK are that it may:

        -
          -
        • Not be available on the Google Play Store or other official app stores.
        • -
        • Require enabling unknown sources on your device, which may expose you to potential risks.
        • -
        • Not be updated regularly or supported by the developer.
        • -
        • Not be compatible with some devices or Android versions.
        • -
        • Not have a user-friendly interface or design.
        • -
        -

        What are some alternatives to Bitcoin Address Utility APK?

        -

        Some alternatives to Bitcoin Address Utility APK are:

        -
          -
        • Bitcoin Wallet: A simple and secure bitcoin wallet app that allows you to send and receive bitcoins, view transactions, create backups, and more.
        • -
        • BitPay: A bitcoin payment app that allows you to buy, store, and spend bitcoins, convert them to local currencies, manage multiple wallets, and more.
        • -
        • Blockchain.com Wallet: A popular bitcoin wallet app that allows you to send, receive, and store bitcoins, access global markets, exchange cryptocurrencies, earn rewards, and more.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FINAL FANTASY IV THE AFTER YEARS APK for Android - The 3D Remake of the Classic RPG.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FINAL FANTASY IV THE AFTER YEARS APK for Android - The 3D Remake of the Classic RPG.md deleted file mode 100644 index e30a133395aeba606590f2a8b332d6bc1cef5dde..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download FINAL FANTASY IV THE AFTER YEARS APK for Android - The 3D Remake of the Classic RPG.md +++ /dev/null @@ -1,116 +0,0 @@ -
        -

        Final Fantasy IV: The After Years APK - A Classic RPG Sequel for Android Devices

        |

        If you are a fan of classic role-playing games, you might have heard of Final Fantasy IV, one of the most popular and influential titles in the genre. Released in 1991 for the Super Nintendo Entertainment System, Final Fantasy IV introduced many innovations and features that would become staples of the series, such as the Active Time Battle system, the theme of love, and a memorable cast of characters.

        -

        final fantasy 4 after years apk


        Download Zip 🆓 https://ssurll.com/2uNZpP



        -

        But did you know that there is a sequel to Final Fantasy IV that continues the story of Cecil, Rosa, Rydia, Kain, and other beloved characters? And did you know that you can play this sequel on your Android device with the Final Fantasy IV: The After Years APK?

        -

        In this article, we will tell you everything you need to know about Final Fantasy IV: The After Years APK, including what it is, how to download and install it, and why you should play it. Read on to find out more!

        -

        What is Final Fantasy IV: The After Years?

        -

        Final Fantasy IV: The After Years is a direct sequel to Final Fantasy IV that was originally released in 2008 as a series of episodic downloads for mobile phones in Japan. It was later ported to other platforms, such as WiiWare, PlayStation Portable, iOS, and Android. The Android version was released in 2013 and features enhanced graphics, music, and gameplay.

        -

        final fantasy iv after years android download
        -final fantasy 4 the after years 3d remake apk
        -final fantasy iv the after years full game apk
        -final fantasy 4 after years mod apk
        -final fantasy iv after years free download for android
        -final fantasy 4 the after years apk + data
        -final fantasy iv the after years apk obb
        -final fantasy 4 after years walkthrough android
        -final fantasy iv after years cheats android
        -final fantasy 4 the after years best party
        -final fantasy iv the after years characters
        -final fantasy 4 after years lunar whale
        -final fantasy iv the after years review
        -final fantasy 4 the after years steam
        -final fantasy iv the after years psp iso
        -final fantasy 4 after years ds rom
        -final fantasy iv the after years wii
        -final fantasy 4 the after years ios
        -final fantasy iv the after years switch
        -final fantasy 4 the after years pc download
        -final fantasy iv the after years gba rom hack
        -final fantasy 4 the after years how to level up fast
        -final fantasy iv the after years moon phases
        -final fantasy 4 the after years band list
        -final fantasy iv the after years ost download
        -final fantasy 4 the after years rydia's tale walkthrough
        -final fantasy iv the after years edge's tale walkthrough
        -final fantasy 4 the after years kain's tale walkthrough
        -final fantasy iv the after years porom's tale walkthrough
        -final fantasy 4 the after years yang's tale walkthrough
        -final fantasy iv the after years palom's tale walkthrough
        -final fantasy 4 the after years edward's tale walkthrough
        -final fantasy iv the after years ceodore's tale walkthrough
        -final fantasy 4 the after years lunarian's tale walkthrough
        -final fantasy iv the after years interlude walkthrough
        -final fantasy 4 the after years challenge dungeon guide
        -final fantasy iv the after years best equipment guide
        -final fantasy 4 the after years secret boss guide
        -final fantasy iv the after years eidolon guide
        -final fantasy 4 the after years augment guide
        -final fantasy iv the after years gil farming guide
        -final fantasy 4 the after years item list guide
        -final fantasy iv the after years magic list guide
        -final fantasy 4 the after years enemy list guide
        -final fantasy iv the after years map guide

        -

        Final Fantasy IV: The After Years is set 17 years after the events of Final Fantasy IV and follows the adventures of Ceodore, the son of Cecil and Rosa, as well as other characters from the original game and some new ones. The game features a similar gameplay system to Final Fantasy IV, but with some additions and modifications.

        -

        The story of Final Fantasy IV: The After Years

        -

        The story of Final Fantasy IV: The After Years is divided into 10 chapters, each focusing on a different character or group of characters. The first chapter is called "The Prologue" and introduces Ceodore, who is undergoing a trial to become a member of the Red Wings, the elite air force of Baron. However, his mission is interrupted by a mysterious girl who attacks Baron with a horde of monsters. Ceodore then teams up with Biggs and Wedge, two loyal soldiers of Baron, to find out what is going on.

        -

        The other chapters are called "Ceodore's Tale", "Rydia's Tale", "Yang's Tale", "Palom's Tale", "Edge's Tale", "Porom's Tale", "Edward's Tale", "Kain's Tale", and "The Lunarians' Tale". Each chapter explores the fate of the characters from Final Fantasy IV and how they are affected by the appearance of a second moon in the sky. Along the way, they encounter new enemies, allies, and mysteries.

        -

        The final chapter is called "The Crystals" and brings together all the characters from the previous chapters to face the ultimate threat to the world. It also reveals the true identity of the mysterious girl and her connection to Golbez, Cecil's brother and former antagonist.

        -

        The gameplay of Final Fantasy IV: The After Years

        -

        The gameplay of Final Fantasy IV: The After Years is largely similar to that of Final Fantasy IV, but with some new features and tweaks. Here are some of the main aspects of the gameplay:

        -

        Active Time Battle system

        -

        The Active Time Battle (ATB) system is a trademark feature of the Final Fantasy series that allows the player to control multiple characters in a turn-based combat system. Each character has a gauge that fills up over time, and when it is full, they can perform an action, such as attacking, using an item, casting a spell, or using a special ability. The player can also change the battle speed and the order of the characters in the menu.

        -

        Moon Phase system

        -

        The Moon Phase system is a new feature that affects the gameplay in various ways. The game has a lunar cycle that changes every few days, and each phase of the moon has different effects on the characters and the enemies. For example, when the moon is full, physical attacks are stronger, but magic attacks are weaker. When the moon is new, magic attacks are stronger, but physical attacks are weaker. The moon phase also affects some of the enemies' behaviors and abilities, as well as some of the events and items in the game.

        -

        Band system

        -

        The Band system is another new feature that allows the characters to combine their abilities and perform powerful attacks together. To use a Band, the player has to select the "Band" option in the battle menu and then choose two or more characters who have compatible abilities. The characters will then execute a coordinated attack that consumes their MP and deals massive damage to the enemies. There are over 70 different Bands in the game, and some of them require specific conditions or events to be unlocked.

        -

        How to download and install Final Fantasy IV: The After Years APK?

        -

        If you want to play Final Fantasy IV: The After Years on your Android device, you will need to download and install the APK file of the game. An APK file is a package file that contains all the data and resources of an Android application. You can download the Final Fantasy IV: The After Years APK file from various sources online, but make sure you choose a reliable and safe one.

        -

        Before you download and install the Final Fantasy IV: The After Years APK file, you will need to do some preparations on your device. Here are the steps you need to follow:

        -

        Requirements for Final Fantasy IV: The After Years APK

        -

        To play Final Fantasy IV: The After Years on your Android device, you will need to meet some minimum requirements. Here are the requirements for Final Fantasy IV: The After Years APK:

        - | Requirement | Description | | --- | --- | | Android version | 2.3 or higher | | Storage space | At least 450 MB of free space | | RAM | At least 512 MB of RAM | | Internet connection | Required for downloading additional data |

        Steps to download and install Final Fantasy IV: The After Years APK

        -

        Once you have met the requirements for Final Fantasy IV: The After Years APK, you can proceed to download and install it on your device. Here are the steps you need to follow:

        -
          -
        1. Enable unknown sources on your device. This will allow you to install applications from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown sources and toggle it on.
        2. -
        3. Download the Final Fantasy IV: The After Years APK file from a trusted source. You can use your browser or a file manager app to do this. Make sure you save the file in a location that you can easily access later.
        4. -
        5. Locate the Final Fantasy IV: The After Years APK file on your device and tap on it to start the installation process. You may see a warning message asking you to confirm your action. Tap on "Install" to proceed.
        6. -
        7. Wait for the installation process to finish. It may take a few minutes depending on your device and internet speed.
        8. -
        9. Launch the game from your app drawer or home screen. You may be asked to download additional data for the game. Tap on "OK" to start the download.
        10. -
        11. Enjoy playing Final Fantasy IV: The After Years on your Android device!
        12. -
        -

        Why should you play Final Fantasy IV: The After Years APK?

        -

        Final Fantasy IV: The After Years is a game that will appeal to both fans of Final Fantasy IV and newcomers alike. It offers a rich and engaging story that expands on the original game's lore and characters, as well as a challenging and fun gameplay system that adds new elements and mechanics. Here are some of the pros and cons of playing Final Fantasy IV: The After Years APK:

        -

        Pros of Final Fantasy IV: The After Years APK

        -
          -
        • You can experience a classic RPG sequel that continues the story of one of the most beloved games in the genre.
        • -
        • You can explore a vast world with different locations, dungeons, secrets, and side quests.
        • -
        • You can control over 20 different characters, each with their own personality, backstory, and skills.
        • -
        • You can customize your party and strategy with the Active Time Battle, Moon Phase, and Band systems.
        • -
        • You can enjoy the improved graphics, music, and sound effects of the Android version.
        • -
        • You can play the game offline without any internet connection.
        • -
        -

        Cons of Final Fantasy IV: The After Years APK

        -
          -
        • You may find the game too difficult or frustrating at some points, especially if you are not familiar with the original game or the genre.
        • -
        • You may encounter some bugs or glitches that affect the performance or functionality of the game.
        • -
        • You may not like the episodic structure of the game, which requires you to switch between different characters and scenarios frequently.
        • -
        • You may not enjoy the retro style of the game, which may seem outdated or simplistic compared to modern games.
        • -
        -

        Conclusion

        -

        Final Fantasy IV: The After Years APK is a great way to experience a classic RPG sequel that offers a compelling story, a diverse cast of characters, and a challenging and fun gameplay system. Whether you are a fan of Final Fantasy IV or a newcomer to the series, you will find something to enjoy in this game. If you are looking for a nostalgic and immersive adventure on your Android device, you should definitely give Final Fantasy IV: The After Years APK a try!

        -

        FAQs

        -

        Here are some of the frequently asked questions about Final Fantasy IV: The After Years APK:

        -
          -
        1. Is Final Fantasy IV: The After Years APK free?
        2. -

          No, Final Fantasy IV: The After Years APK is not free. You will need to pay a one-time fee of $15.99 to download and install the game on your device. However, this fee includes all the chapters and content of the game, so you will not need to pay for any additional in-app purchases or subscriptions.

          -
        3. Is Final Fantasy IV: The After Years APK safe?
        4. -

          Yes, Final Fantasy IV: The After Years APK is safe, as long as you download it from a trusted and reliable source. However, you should always be careful when downloading and installing any APK file on your device, as some sources may contain malicious or harmful software that can damage your device or compromise your privacy. You should also scan the APK file with an antivirus app before installing it.

          -
        5. Do I need to play Final Fantasy IV before playing Final Fantasy IV: The After Years APK?
        6. -

          No, you do not need to play Final Fantasy IV before playing Final Fantasy IV: The After Years APK. The game has a prologue that summarizes the events and characters of the original game, as well as tutorials and hints that explain the gameplay mechanics and systems. However, playing Final Fantasy IV before playing Final Fantasy IV: The After Years APK will enhance your enjoyment and understanding of the game, as you will be more familiar with the world, the story, and the characters.

          -
        7. How long is Final Fantasy IV: The After Years APK?
        8. -

          The length of Final Fantasy IV: The After Years APK depends on your playstyle and preferences. However, according to some estimates, the game can take anywhere from 20 to 40 hours to complete, depending on how much you explore, do side quests, and replay chapters. The game also has multiple endings and secrets that can add replay value and increase the length of the game.

          -
        9. Can I play Final Fantasy IV: The After Years APK on other devices?
        10. -

          Yes, you can play Final Fantasy IV: The After Years APK on other devices besides Android. The game is also available for iOS, PlayStation Portable, WiiWare, Windows PC, and Nintendo Switch. However, you will need to purchase the game separately for each platform, as there is no cross-platform compatibility or transfer option.

          -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Harry Potter Magic Awakened APK and Experience the Magical World.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Harry Potter Magic Awakened APK and Experience the Magical World.md deleted file mode 100644 index dfc29270567aacd7a0abd747a210874862749e04..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Harry Potter Magic Awakened APK and Experience the Magical World.md +++ /dev/null @@ -1,104 +0,0 @@ - -

        Harry Potter: Magic Awakened - A New Wizarding Adventure for Android

        -

        Are you a fan of the Harry Potter series? Have you ever dreamed of attending Hogwarts, learning magic, and dueling with other witches and wizards? If so, you will be delighted to know that there is a new game that lets you experience all that and more. It's called Harry Potter: Magic Awakened, and it's coming soon to your Android device.

        -

        harry potter magic awakened global apk


        Download File · https://ssurll.com/2uNX3p



        -

        Harry Potter: Magic Awakened is a collectible card (CCG) and massively multiplayer (MMO) wizarding dueling game featuring a blend of strategy roleplay (RPG) elements. Players will take on the role of a first-year student at Hogwarts, joining thousands of other witches and wizards in a quest to master their magic. The game is described as a card collection game (CCG) and role-playing game (RPG) hybrid, fusing multiple genres and elements together. It offers an immersive role-playing journey with deep strategy play that fulfills the fan fantasy.

        -

        In this article, we will tell you everything you need to know about Harry Potter: Magic Awakened, including what it is, how to play it, how to download it, and what are its features and benefits. Let's get started!

        -

        What is Harry Potter: Magic Awakened?

        -

        Harry Potter: Magic Awakened is a new game that is based on the popular Harry Potter franchise created by J.K. Rowling. It is developed by NetEase Games and published by Warner Bros Games under the Portkey Games label. Portkey Games covers all games taking place in the Wizarding World that are published by WB Games. Other games published under Portkey Games include Harry Potter: Hogwarts Mystery, as well as the upcoming action role-playing game Hogwarts Legacy.

        -

        A card collection and role-playing game set in the wizarding world

        -

        Harry Potter: Magic Awakened is a game that combines card collection and role-playing elements in an innovative way. Players will collect and level up cards that represent spells, charms, potions, artifacts, and magical creatures. They will use these cards to cast magic and summon allies in battles against various enemies and challenges. The game uses stylized art direction, so instead of aiming to look realistic, it captures an otherworldly side of Hogwarts and its denizens.

        -

        harry potter magic awakened download android
        -harry potter magic awakened apk mod
        -harry potter magic awakened release date global
        -harry potter magic awakened gameplay
        -harry potter magic awakened official website
        -harry potter magic awakened free download
        -harry potter magic awakened apk obb
        -harry potter magic awakened review
        -harry potter magic awakened trailer
        -harry potter magic awakened reddit
        -harry potter magic awakened ios
        -harry potter magic awakened beta apk
        -harry potter magic awakened system requirements
        -harry potter magic awakened netease
        -harry potter magic awakened wiki
        -harry potter magic awakened characters
        -harry potter magic awakened pc
        -harry potter magic awakened online
        -harry potter magic awakened tips and tricks
        -harry potter magic awakened cheats
        -harry potter magic awakened hack apk
        -harry potter magic awakened play store
        -harry potter magic awakened registration
        -harry potter magic awakened discord
        -harry potter magic awakened update
        -harry potter magic awakened quidditch
        -harry potter magic awakened hogwarts mystery
        -harry potter magic awakened english version apk
        -harry potter magic awakened how to install
        -harry potter magic awakened best class
        -harry potter magic awakened card game
        -harry potter magic awakened emulator
        -harry potter magic awakened facebook
        -harry potter magic awakened guide
        -harry potter magic awakened news
        -harry potter magic awakened rpg
        -harry potter magic awakened spells
        -harry potter magic awakened twitter
        -harry potter magic awakened wallpaper
        -harry potter magic awakened youtube

        -

        An original story that takes place 10 years after the Battle of Hogwarts

        -

        Harry Potter: Magic Awakened is not a direct adaptation of the books or movies, but rather an original story that takes place 10 years after the Battle of Hogwarts. Players will create their own character and enroll at Hogwarts as a first-year student. They will encounter beloved characters like Harry, Hermione, Ron, Hagrid, Dumbledore, Snape, and more, as well as new adventure companions. They will also face new

        A game published by WB Games under the Portkey Games label

        -

        Harry Potter: Magic Awakened is one of the games that are published by WB Games under the Portkey Games label. Portkey Games is a label dedicated to creating new games inspired by the Wizarding World of Harry Potter. These games allow players to create their own stories and adventures in the magical universe, while also featuring familiar characters and locations from the books and movies. Some of the other games under Portkey Games include Harry Potter: Hogwarts Mystery, Harry Potter: Puzzles & Spells, and the upcoming Hogwarts Legacy.

        -

        How to play Harry Potter: Magic Awakened?

        -

        Harry Potter: Magic Awakened is a game that is easy to learn but hard to master. It has a variety of gameplay modes and features that will keep you entertained and challenged. Here are some of the basics of how to play the game.

        -

        Create and customize your own witch or wizard

        -

        The first thing you need to do in Harry Potter: Magic Awakened is to create your own character. You can choose your gender, appearance, name, and house affiliation. You can also customize your outfit, wand, pet, and dormitory. Your character will be your avatar in the game, and you can change their appearance anytime you want.

        -

        Collect and level up cards to cast spells and summon creatures

        -

        The core of Harry Potter: Magic Awakened is the card collection and combat system. You will collect cards that represent different spells, charms, potions, artifacts, and magical creatures. You can use these cards to cast magic and summon allies in battles against various enemies and challenges. Each card has its own attributes, effects, and costs. You can level up your cards by using them in battles or by combining them with other cards. You can also create your own decks of cards to suit your playstyle and strategy.

        -

        Duel in thrilling multiplayer, real-time matches

        -

        One of the most exciting features of Harry Potter: Magic Awakened is the multiplayer mode. You can duel with other players from around the world in real-time matches. You can choose from different modes, such as ranked matches, casual matches, team matches, or friendly matches. You can also join or create clubs with other players to chat, share tips, and challenge each other. Duelling with other players will test your skills, strategy, and creativity.

        -

        Explore and socialize with other wizards at Hogwarts and beyond

        -

        Harry Potter: Magic Awakened is not just about duelling. It is also about exploring and socializing with other wizards at Hogwarts and beyond. You can visit various locations from the Harry Potter universe, such as the Great Hall, the Forbidden Forest, Diagon Alley, Hogsmeade, and more. You can interact with familiar characters from the books and movies, such as Harry, Hermione, Ron, Hagrid, Dumbledore, Snape, and more. You can also make new friends and adventure companions along the way. You can chat with other players using text or voice messages. You can also participate in various events and activities, such as Quidditch matches, club activities, seasonal festivals, and more.

        -

        How to download Harry Potter: Magic Awakened global apk?

        -

        If you are eager to play Harry Potter: Magic Awakened on your Android device, you might be wondering how to download it. Here are some steps to help you get started.

        -

        The game is set to launch globally on June 27, 2023

        -

        The good news is that Harry Potter: Magic Awakened is set to launch globally on June 27, 2023. This means that you don't have to wait too long to play it. The game will be available on iOS via the App Store and Android via the Google Play Store. It will also be available on PC via a dedicated website.

        -

        The game will be available on iOS, Android, and PC platforms

        -

        Harry Potter: Magic Awakened will be available on iOS via the App Store and Android via the Google Play Store. It will also be available on PC via a dedicated website. The game will support cross-platform play, meaning that you can play with other players regardless of their device type.

        -

        The game can be downloaded from the official website or the Google Play Store

        -

        To download Harry Potter: Magic Awakened on your Android device, you have two options. You can either download it from the official website or from the Google Play Store.

        -
          -
        • To download it from the official website, you need to visit https://www.hpmagicawakened.com/ on your browser and follow the instructions to download and install the game. You might need to enable the installation of apps from unknown sources on your device settings.
        • -
        • To download it from the Google Play Store, you need to visit https://play.google.com/store/apps/details?id=com.wb.goog.hpmagicawakened on your browser or open the Google Play Store app on your device and search for Harry Potter: Magic Awakened. Then, you need to tap on the Install button and wait for the game to download and install.
        • -
        -

        Either way, you will need to have a stable internet connection and enough storage space on your device to download and play the game. You will also need to register an account and agree to the terms of service and privacy policy of the game.

        -

        What are the features and benefits of Harry Potter: Magic Awakened?

        -

        Harry Potter: Magic Awakened is a game that offers many features and benefits for players who love the Harry Potter franchise and card collection games. Here are some of the main ones:

        -

        A stunning and immersive wizarding world with stylized graphics and sound effects

        -

        Harry Potter: Magic Awakened is a game that will make you feel like you are part of the wizarding world. The game features stylized graphics that capture the essence and charm of the Harry Potter universe, with detailed environments, characters, and animations. The game also features original sound effects and music that enhance the atmosphere and mood of the game. You will hear familiar voices, spells, creatures, and themes from the books and movies, as well as new ones created for the game.

        -

        A rich and diverse gameplay with both PvE and PvP modes

        -

        Harry Potter: Magic Awakened is a game that will keep you entertained and challenged with its rich and diverse gameplay. The game has both PvE (player versus environment) and PvP (player versus player) modes, offering different levels of difficulty, rewards, and fun. In PvE mode, you can follow the main story line, complete various quests, explore different locations, and encounter various enemies and challenges. In PvP mode, you can duel with other players from around the world in real-time matches, test your skills and strategy, and climb the leaderboards.

        -

        A faithful and engaging adaptation of the Harry Potter franchise with familiar characters and locations

        -

        Harry Potter: Magic Awakened is a game that will appeal to fans of the Harry Potter franchise, as it is a faithful and engaging adaptation of the books and movies. The game features familiar characters and locations from the Harry Potter universe, such as Harry, Hermione, Ron, Hagrid, Dumbledore, Snape, Voldemort, Hogwarts, Diagon Alley, Hogsmeade, and more. The game also features an original story that takes place 10 years after the Battle of Hogwarts, with new adventure companions and enemies. The game respects the canon and lore of the Harry Potter franchise, while also adding new elements and twists.

        -

        Conclusion

        -

        Harry Potter: Magic Awakened is a game that will delight fans of the Harry Potter franchise and card collection games alike. It is a game that combines card collection and role-playing elements in an innovative way, offering an immersive role-playing journey with deep strategy play that fulfills the fan fantasy. It is a game that features a stunning and immersive wizarding world with stylized graphics and sound effects, a rich and diverse gameplay with both PvE and PvP modes, and a faithful and engaging adaptation of the Harry Potter franchise with familiar characters and locations. It is a game that will be available on iOS, Android, and PC platforms, and can be downloaded from the official website or the Google Play Store. It is a game that is set to launch globally on June 27, 2023.

        -

        If you are a fan of the Harry Potter series and card collection games, you should definitely check out Harry Potter: Magic Awakened. It is a game that will let you experience the magic of the wizarding world like never before. It is a game that will make you feel like you are part of the story, and not just a spectator. It is a game that will challenge your mind, and not just your fingers. It is a game that will bring you joy, and not just entertainment.

        -

        Are you ready to awaken your magic? Download Harry Potter: Magic Awakened today and join the adventure!

        -

        FAQs

        -

        Here are some of the frequently asked questions about Harry Potter: Magic Awakened.

        - - - - - - -
        Q: Is Harry Potter: Magic Awakened free to play?A: Yes, Harry Potter: Magic Awakened is free to download and play. However, the game may offer some optional in-game purchases that can enhance your gameplay experience.
        Q: What are the system requirements for Harry Potter: Magic Awakened?A: The minimum system requirements for Harry Potter: Magic Awakened are as follows: -
          -
        • Android: Android 5.0 or higher, 2 GB RAM or higher, 4 GB storage space or higher
        • -
        • iOS: iOS 10.0 or higher, iPhone 6S or higher, iPad Air 2 or higher, iPad Mini 4 or higher
        • -
        • PC: Windows 7 or higher, Intel Core i3 or higher, 4 GB RAM or higher, NVIDIA GeForce GT 730 or higher
        • -
        Q: How can I contact the customer service of Harry Potter: Magic Awakened?A: You can contact the customer service of Harry Potter: Magic Awakened by visiting https://www.hpmagicawakened.com/support on your browser or by tapping on the Settings icon in the game and selecting Customer Service. You can also email them at hpmagicawakened@wb.com.
        Q: How can I get more cards in Harry Potter: Magic Awakened?A: You can get more cards in Harry Potter: Magic Awakened by completing quests, participating in events, dueling with other players, opening chests, and purchasing card packs.
        Q: How can I join a club in Harry Potter: Magic Awakened?A: You can join a club in Harry Potter: Magic Awakened by tapping on the Club icon in the game and selecting Join Club. You can either search for an existing club by name or ID, or browse through the recommended clubs. You can also create your own club by tapping on Create Club and paying a certain amount of gold.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Music at Mdundo The Best Source of Free African Music.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Music at Mdundo The Best Source of Free African Music.md deleted file mode 100644 index ffa0ac4a9b93355c5e16b5c713e02f05d9087f3c..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Music at Mdundo The Best Source of Free African Music.md +++ /dev/null @@ -1,81 +0,0 @@ -
        -

        Download Music at Mdundo

        -

        If you are a fan of African music, you might have heard of Mdundo, a popular music platform that allows you to download and listen to your favourite songs from various artists across the continent. But what is Mdundo exactly and how can you use it to enjoy your music? In this article, we will answer these questions and more, and show you why Mdundo is the best choice for downloading music online.

        -

        download music at mudundo


        Download File --->>> https://ssurll.com/2uO1k2



        -

        What is Mdundo?

        -

        A brief introduction to Mdundo and its features

        -

        Mdundo is a one-stop solution for all your music needs. It is a music app and website that offers you free, unlimited access to all your favourite songs from Tanzania, Uganda, Kenya, Nigeria, Zambia, Ghana, and more. You can listen to your favourite artists right on your phone or computer, wherever you are and whatever you are doing. Mdundo has some amazing features that make it stand out from other music platforms, such as:

        -
          -
        • High quality MP3 downloads of your favourite African artists
        • -
        • Hundreds of playlists created by experts
        • -
        • Trending charts for Kenya, Uganda, and Tanzania
        • -
        • New releases from thousands of artists in East Africa
        • -
        • Offline listening and custom playlists
        • -
        • Gospel, Hip Hop, Genge, Afropop, Dancehall, DJ Mixes, Rhumba, Zilizopendwa, and many more genres
        • -
        -

        Why download music at Mdundo?

        -

        The benefits of using Mdundo for music lovers

        -

        Mdundo is not just another music app or website. It is a platform that connects you with the best of African music and culture. By downloading music at Mdundo, you can enjoy some of the following benefits:

        -

        Access to a large library of African music

        -

        Mdundo gives you access to the largest library of music in Tanzania, Uganda, Kenya, Nigeria, Zambia, Ghana, and more. You can find songs from all your favourite artists, such as Diamond Platnumz, Harmonize, Sauti Sol, Davido, Wizkid, Burna Boy, Tiwa Savage, and many more. You can also discover new artists and genres that you might not have heard before. Whether you are looking for Bongo Flava, Naija Music, Gospel Music, or any other type of music, you can find it on Mdundo.

        -

        download free music from mdundo
        -mdundo music app download for android
        -how to download mdundo songs to phone
        -mdundo gospel music download mp3
        -download latest afrobeats songs from mdundo
        -mdundo free music downloads everyday
        -mdundo nigerian music download
        -download bongo flava music at mdundo
        -mdundo dj mixes download 2023
        -mdundo music download for pc
        -download zambian music at mdundo
        -mdundo ugandan music download mp3
        -download dancehall music from mdundo
        -mdundo hip hop music download
        -download taarab music at mdundo
        -mdundo kenyan music download
        -download zilizopendwa music from mdundo
        -mdundo rhumba music download
        -download new gospel mixes at mdundo
        -mdundo naija throwback mixes download
        -download tanzanian music at mdundo
        -mdundo ghanaian music download
        -download genge music from mdundo
        -mdundo afropop music download
        -download sauti sol songs at mdundo
        -mdundo octopizzo music download
        -download mercy masika songs from mdundo
        -mdundo nameless music download
        -download daddy owen songs at mdundo
        -mdundo the kansoul music download
        -download peter msechu songs from mdundo
        -mdundo ruby music download
        -download khadija kopa songs at mdundo
        -mdundo king saha music download
        -download ziza bafana songs from mdundo
        -mdundo radio and weasel music download
        -download lilian mbabazi songs at mdundo
        -mdundo navio music download
        -download belle 9 songs from mdundo
        -mdundo vanessa mdee music download
        -download ben pol songs at mudundo
        -mudundo nikki mbishi music download
        -download jux songs from mudundo
        -mudundo kagwe mungai music download
        -download atemi oyungu songs at mudundo
        -mudundo kelele takatifu music download
        -download h_art the band songs from mudundo
        -mudundo ken wa maria music download
        -download amani songs at mudundo

        -

        High quality MP3 downloads

        -

        Mdundo does not compromise on quality when it comes to downloading music. You can download high quality MP3 files that are compatible with any device. You can also choose the bitrate that suits your preference and data plan. You can download as many songs as you want without any limits or restrictions.

        -

        Offline listening and custom playlists

        -

        Mdundo allows you to listen to your downloaded songs offline without any internet connection. You can also create your own custom playlists based on your mood, taste, or occasion. You can save your favourite songs and artists to listen to whenever you are offline. You can also share your playlists with your friends and family.

        -

        Trending charts and new releases

        -

        Mdundo keeps you updated with the latest trends and releases in the African music scene. You can check out the trending charts for Kenya, Uganda, and Tanzania to see what songs are hot right now. You can also find out the new releases from
        Yes, Mdundo is legal and safe to use. Mdundo works with the artists and labels to ensure that they get paid for their music. Mdundo also respects the intellectual property rights of the music owners.

      6. -
      7. How can I support Mdundo?
        You can support Mdundo by sharing it with your friends and family, and by giving it a positive rating and review on the app store or the website. You can also follow Mdundo on social media and subscribe to their newsletter.
      8. -
      9. What are the requirements to use Mdundo?
        You need a device that can access the internet, such as a smartphone, tablet, laptop, or desktop. You also need a data plan or a Wi-Fi connection to download or stream music on Mdundo. You do not need to register or sign up to use Mdundo.
      10. -
      11. How can I contact Mdundo?
        You can contact Mdundo by sending an email to info@mdundo.com or by filling out the contact form on their website. You can also reach them on Facebook, Twitter, Instagram, or YouTube.
      12. -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Rescue and Fight Fires with Real Fire Truck Driving Simulator Fire Fighting Mod APK.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Rescue and Fight Fires with Real Fire Truck Driving Simulator Fire Fighting Mod APK.md deleted file mode 100644 index b1812d7cbed2244d5ef7f319fba80e9652de46ae..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Drive Rescue and Fight Fires with Real Fire Truck Driving Simulator Fire Fighting Mod APK.md +++ /dev/null @@ -1,84 +0,0 @@ - -

        Real Fire Truck Driving Simulator Fire Fighting Mod Apk: A Review

        -

        If you love fire truck games and want to become a real fireman, then you should try Real Fire Truck Driving Simulator Fire Fighting Mod Apk. This is the best fire fighting game simulator of 2021 that will give you an adrenaline rush and a realistic experience of saving the city from fires. In this article, we will review this game and tell you why you should play it, what features it has, and how to download and install it on your device.

        -

        What is Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        Real Fire Truck Driving Simulator Fire Fighting Mod Apk is a modified version of the original game that gives you unlimited money and unlocked features. You can use this money to buy and upgrade your fire trucks and enjoy the game without any limitations. You can also access all the features that are otherwise locked in the original game, such as different camera angles, steering wheel, arrows, and more.

        -

        real fire truck driving simulator fire fighting mod apk


        Download File –––––>>> https://ssurll.com/2uO0iB



        -

        Why should you play Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        There are many reasons why you should play this game, but here are some of the main ones:

        -

        Amazing fire trucks

        -

        You can choose from 5 different fire trucks that have their own specifications and abilities. You can customize them with various colors, stickers, and accessories. You can also upgrade them with better engines, tires, brakes, and more.

        -

        Realistic 3D graphics

        -

        The game has stunning 3D graphics that will make you feel like you are in a real city. You can see the buildings, roads, trees, cars, people, and of course, the fires. The game also has realistic sound effects that will enhance your immersion.

        -

        Huge city

        -

        The game has a huge city map that you can explore and drive around. You can find different locations, such as residential areas, industrial zones, parks, bridges, highways, and more. You can also encounter different situations, such as traffic jams, accidents, roadblocks, and more.

        -

        Sirens, lights, horn, signals

        -

        The game has realistic sirens, lights, horn, and signals that you can use to alert other drivers and pedestrians. You can also use them to communicate with other fire trucks and emergency vehicles. You can switch between different modes of sirens and lights depending on the situation.

        -

        Exciting missions

        -

        The game has many exciting missions that will challenge your skills and reflexes. You have to respond to emergency calls and reach the fire scenes as fast as possible. You have to extinguish the fires using your water hose and rescue the people trapped inside the buildings. You have to follow the instructions and complete the missions within the time limit.

        -

        real fire truck driving simulator fire fighting game download
        -real fire truck driving simulator fire fighting apk mod unlimited money
        -real fire truck driving simulator fire fighting android gameplay
        -real fire truck driving simulator fire fighting free online
        -real fire truck driving simulator fire fighting hack apk
        -real fire truck driving simulator fire fighting latest version
        -real fire truck driving simulator fire fighting review
        -real fire truck driving simulator fire fighting cheats
        -real fire truck driving simulator fire fighting offline
        -real fire truck driving simulator fire fighting 3d
        -real fire truck driving simulator fire fighting tips and tricks
        -real fire truck driving simulator fire fighting best trucks
        -real fire truck driving simulator fire fighting update
        -real fire truck driving simulator fire fighting ios
        -real fire truck driving simulator fire fighting pc
        -real fire truck driving simulator fire fighting softonic
        -real fire truck driving simulator fire fighting youtube
        -real fire truck driving simulator fire fighting king games
        -real fire truck driving simulator fire fighting play store
        -real fire truck driving simulator fire fighting features
        -real fire truck driving simulator fire fighting missions
        -real fire truck driving simulator fire fighting graphics
        -real fire truck driving simulator fire fighting controls
        -real fire truck driving simulator fire fighting sound effects
        -real fire truck driving simulator fire fighting realistic physics
        -real fire truck driving simulator fire fighting city map
        -real fire truck driving simulator fire fighting rescue mode
        -real fire truck driving simulator fire fighting simulation games
        -real fire truck driving simulator fire fighting mod apk download link
        -real fire truck driving simulator fire fighting how to install

        -

        AI cars, traffic lights

        -

        The game has realistic AI cars and traffic lights that will make your driving more challenging and fun. You have to obey the traffic rules and avoid collisions with other vehicles. You have to watch out for red lights, stop signs, speed limits, and more.

        -

        Different camera angles

        -

        The game has different camera angles that you can switch between to get a better view of your surroundings. You can choose from first-person view, third-person view, top-down view, rear view, and more. You can also zoom in and out to see more details.

        -

        Steering wheel, arrows

        -

        The game has two options for controlling your fire truck. You can choose between steering wheel and arrows. You can also adjust the sensitivity and position of the controls according to your preference. The game has smooth and realistic physics that will make your driving more enjoyable.

        -

        How to download and install Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        If you want to download and install Real Fire Truck Driving Simulator Fire Fighting Mod Apk, you can follow these simple steps:

        -
          -
        1. Click on the download button below to get the mod apk file.
        2. -
        3. Allow unknown sources in your device settings.
        4. -
        5. Locate the downloaded file and tap on it to install it.
        6. -
        7. Launch the game and enjoy!
        8. -
        -

        Download Real Fire Truck Driving Simulator Fire Fighting Mod Apk

        -

        Conclusion

        -

        Real Fire Truck Driving Simulator Fire Fighting Mod Apk is a great game for fire truck lovers and fireman wannabes. It has amazing features, realistic graphics, huge city, exciting missions, and more. You can download and install it for free and enjoy unlimited money and unlocked features. You can also play it offline and online with your friends. If you are looking for a fun and thrilling fire fighting game simulator, you should definitely try this one.

        -

        FAQs

        -

        Here are some of the frequently asked questions about Real Fire Truck Driving Simulator Fire Fighting Mod Apk:

        -

        Q: Is Real Fire Truck Driving Simulator Fire Fighting Mod Apk safe to download and install?

        -

        A: Yes, it is safe to download and install. It does not contain any viruses or malware. However, you should always download it from a trusted source and scan it before installing it.

        -

        Q: Do I need to root my device to use Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        A: No, you do not need to root your device to use this mod apk. It works on both rooted and non-rooted devices.

        -

        Q: What are the minimum requirements to play Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        A: The minimum requirements to play this game are:

        -
          -
        • Android 4.4 or higher
        • -
        • 2 GB of RAM
        • -
        • 300 MB of free storage space
        • -
        -

        Q: How can I play Real Fire Truck Driving Simulator Fire Fighting Mod Apk with my friends?

        -

        A: You can play this game with your friends online by connecting to the same Wi-Fi network or using a hotspot. You can also chat with them using the in-game voice chat feature.

        -

        Q: How can I contact the developers of Real Fire Truck Driving Simulator Fire Fighting Mod Apk?

        -

        A: You can contact the developers of this game by sending them an email at [support@realfiretruck.com] or by visiting their [Facebook page].

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/skf15963/summary/fengshen/examples/clue1.1/predict2submit/cmrc2018_submit.py b/spaces/skf15963/summary/fengshen/examples/clue1.1/predict2submit/cmrc2018_submit.py deleted file mode 100644 index 95a89f302d0d77e540ff759cb268be6067483a43..0000000000000000000000000000000000000000 --- a/spaces/skf15963/summary/fengshen/examples/clue1.1/predict2submit/cmrc2018_submit.py +++ /dev/null @@ -1,41 +0,0 @@ -import json -from tqdm import tqdm -import argparse - - -def save_data(data,file_path): - with open(file_path, 'w', encoding='utf8') as f: - json_data=json.dumps(data,ensure_ascii=False) - f.write(json_data+'\n') - - -def submit(file_path): - id2score={} - with open(file_path, 'r', encoding='utf8') as f: - lines = f.readlines() - for line in lines: - line = json.loads(line) - for choice in line['choices']: - if choice['id'] not in id2score.keys(): - id2score[choice['id']]=[] - id2score[choice['id']].extend(choice['entity_list']) - - result={} - for k,v in id2score.items(): - if v==[]: - result[k]='' - else: - result[k] = sorted(v, key=lambda k: k['score'],reverse=True)[0]['entity_name'] - return result - - -if __name__=="__main__": - parser = argparse.ArgumentParser(description="train") - parser.add_argument("--data_path", type=str,default="") - parser.add_argument("--save_path", type=str,default="") - - args = parser.parse_args() - save_data(submit(args.data_path), args.save_path) - - - \ No newline at end of file diff --git a/spaces/skimai/DragGAN_Streamlit/draggan.py b/spaces/skimai/DragGAN_Streamlit/draggan.py deleted file mode 100644 index 30bb41286e1ab079c15b5ab5d063f5f1ac9e483f..0000000000000000000000000000000000000000 --- a/spaces/skimai/DragGAN_Streamlit/draggan.py +++ /dev/null @@ -1,463 +0,0 @@ -import os -import sys -import time -from typing import List, Optional, Tuple -import math - -import numpy as np -import PIL -import torch -import streamlit as st - -stylegan2_dir = os.path.abspath("stylegan2") -sys.path.insert(0, stylegan2_dir) -import dnnlib -import legacy - -import utils - - -@st.cache_resource -def load_model( - network_pkl: str = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/afhqdog.pkl", - device: torch.device = torch.device("cuda"), - fp16: bool = True, -) -> torch.nn.Module: - """ - Loads a pretrained StyleGAN2-ADA generator network from a pickle file. - - Args: - network_pkl (str): The URL or local path to the network pickle file. - device (torch.device): The device to use for the computation. - fp16 (bool): Whether to use half-precision floating point format for the network weights. - - Returns: - The pretrained generator network. - """ - print('Loading networks from "%s"...' % network_pkl) - with dnnlib.util.open_url(network_pkl) as f: - chkpt = legacy.load_network_pkl(f, force_fp16=fp16) - G = chkpt["G_ema"].to(device).eval() - for param in G.parameters(): - param.requires_grad_(False) - - # Create a new attribute called "activations" for the Generator class - # This will be a list of activations from each layer - G.__setattr__("activations", None) - - # Forward hook to collect features - def hook(module, input, output): - G.activations = output - - # Apply the hook to the 7th layer (256x256) - for i, (name, module) in enumerate(G.synthesis.named_children()): - if i == 6: - print("Registering hook for:", name) - module.register_forward_hook(hook) - - return G - - -@st.cache_data() -def generate_W( - _G: torch.nn.Module, - seed: int = 0, - network_pkl: Optional[str] = None, - truncation_psi: float = 1.0, - truncation_cutoff: Optional[int] = None, - device: torch.device = torch.device("cuda"), -) -> np.ndarray: - """ - Generates a latent code tensor in W+ space from a pretrained StyleGAN2-ADA generator network. - - Args: - _G (torch.nn.Module): The generator network, with underscore to avoid streamlit cache error - seed (int): The random seed to use for generating the latent code. - network_pkl (Optional[str]): The path to the network pickle file. If None, the default network will be used. - truncation_psi (float): The truncation psi value to use for the mapping network. - truncation_cutoff (Optional[int]): The number of layers to use for the truncation trick. If None, all layers will be used. - device (torch.device): The device to use for the computation. - - Returns: - The W+ latent as a numpy array of shape [1, num_layers, 512]. - """ - G = _G - torch.manual_seed(seed) - z = torch.randn(1, G.z_dim).to(device) - num_layers = G.synthesis.num_ws - if truncation_cutoff == -1: - truncation_cutoff = None - elif truncation_cutoff is not None: - truncation_cutoff = min(num_layers, truncation_cutoff) - W = G.mapping( - z, - None, - truncation_psi=truncation_psi, - truncation_cutoff=truncation_cutoff, - ) - return W.cpu().numpy() - - -def forward_G( - G: torch.nn.Module, - W: torch.Tensor, - device: torch.device, -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Forward pass through the generator network. - - Args: - G (torch.nn.Module): The generator network. - W (torch.Tensor): The latent code tensor of shape [batch_size, latent_dim, 512]. - device (torch.device): The device to use for the computation. - - Returns: - A tuple containing the generated image tensor of shape [batch_size, 3, height, width] - and the feature maps tensor of shape [batch_size, num_channels, height, width]. - """ - if not isinstance(W, torch.Tensor): - W = torch.from_numpy(W).to(device) - - img = G.synthesis(W, noise_mode="const", force_fp32=True) - - return img, G.activations[0] - - -@st.cache_data() -def generate_image( - W, - _G: Optional[torch.nn.Module] = None, - network_pkl: Optional[str] = None, - class_idx=None, - device=torch.device("cuda"), -) -> Tuple[PIL.Image.Image, torch.Tensor]: - """ - Generates an image using a pretrained generator network. - - Args: - W (torch.Tensor): A tensor of latent codes of shape [batch_size, latent_dim, 512]. - _G (Optional[torch.nn.Module]): The generator network. If None, the network will be loaded from `network_pkl`. - network_pkl (Optional[str]): The path to the network pickle file. If None, the default network will be used. - class_idx (Optional[int]): The class index to use for conditional generation. If None, unconditional generation will be used. - device (str): The device to use for the computation. - - Returns: - A tuple containing the generated image as a PIL Image object and the feature maps tensor of shape [batch_size, num_channels, height, width]. - """ - if _G is None: - assert network_pkl is not None - _G = load_model(network_pkl, device) - G = _G - - # Labels. - label = torch.zeros([1, G.c_dim], device=device) - if G.c_dim != 0: - if class_idx is None: - raise Exception( - "Must specify class label with --class when using a conditional network" - ) - label[:, class_idx] = 1 - else: - if class_idx is not None: - print("warn: --class=lbl ignored when running on an unconditional network") - - ## Generate image - img, features = forward_G(G, W, device) - - img = utils.tensor_to_PIL(img) - - return img, features - - -def optimize( - W: np.ndarray, - G: torch.nn.Module, - handle_points: List[Tuple[int, int]], - target_points: List[Tuple[int, int]], - r1: int = 3, - r2: int = 12, - tolerance: int = 2, - max_iter: int = 200, - lr: float = 0.1, - multiplier: float = 1.0, - lambda_: float = 0.1, - device: torch.device = torch.device("cuda"), - empty=None, - display_every: int = 10, - target_resolution: int = 512, -) -> np.ndarray: - """ - Optimizes the latent code tensor W to generate an image that matches the target points. - - Args: - W (np.ndarray): The initial latent code tensor of shape [1, num_layers, 512]. - G (torch.nn.Module): The generator network. - handle_points (List[Tuple[int, int]]): The initial handle points as a list of (x, y) tuples. - target_points (List[Tuple[int, int]]): The target points as a list of (x, y) tuples. - r1 (int): The radius of the motion supervision loss. - r2 (int): The radius of the point tracking. - d (int): The tolerance for the handle points to reach the target points. - max_iter (int): The maximum number of optimization iterations. - lr (float): The learning rate for the optimizer. - multiplier (float): The speed multiplier for the motion supervision loss. - lambda_ (float): The weight of the motion supervision loss. - device (torch.device): The device to use for the computation. - empty: The st.empty object to display the intermediate images. - display_every (int): The number of iterations between displaying intermediate images. - target_resolution (int): The target resolution for the generated image. - - Returns: - The optimized latent code tensor W as a numpy array of shape [1, num_layers, 512]. - """ - img, F0 = forward_G(G, W, device) - empty.image( - utils.tensor_to_PIL(img), caption="Initial image", width=target_resolution - ) - - F0_resized = torch.nn.functional.interpolate( - F0, - size=(target_resolution, target_resolution), - mode="bilinear", - align_corners=True, - ).detach() - - # Convert handle/target points to tensors and reorder to [y, x] - handle_points: torch.tensor = ( - torch.tensor(handle_points, device=device).flip(-1).float() - ) - handle_points_0 = handle_points.clone() - target_points: torch.tensor = ( - torch.tensor(target_points, device=device).flip(-1).float() - ) - - W = torch.from_numpy(W).to(device).float() - W.requires_grad_(False) - - # Only optimize the first 6 layers of W - W_layers_to_optimize = W[:, :6].clone() - W_layers_to_optimize.requires_grad_(True) - - optimizer = torch.optim.Adam([W_layers_to_optimize], lr=lr) - - for i in range(max_iter): - start = time.perf_counter() - - # # Check if the handle points have reached the target points - if torch.allclose(handle_points, target_points, atol=tolerance): - break - - optimizer.zero_grad() - - # Detach only the unoptimized layers - W_combined = torch.cat([W_layers_to_optimize, W[:, 6:].detach()], dim=1) - - # Run the generator to get the image and feature maps - img, F = forward_G(G, W_combined, device) - - ## Bilinear interpolate F to be same size as img - F_resized = torch.nn.functional.interpolate( - F, - size=(target_resolution, target_resolution), - mode="bilinear", - align_corners=True, - ) - - # Compute the motion supervision loss - loss, all_shifted_coordinates = motion_supervision( - F_resized, - F0_resized, - handle_points, - target_points, - r1, - lambda_, - device, - multiplier=multiplier, - ) - - # Backpropagate the loss and update the latent code - loss.backward() - - # # Clip gradients if their norm exceeds max_grad_norm - # torch.nn.utils.clip_grad_norm_(W_layers_to_optimize, 1.0) - - # # Compute the L2 regularization term - # l2_regularization = 100 * torch.norm(W_layers_to_optimize - W[:, :6]) ** 2 - # print(l2_regularization.item()) - # # Add the regularization term to the loss - # loss += l2_regularization - - optimizer.step() - - print( - f"{i}\tLoss: {loss.item():0.2f}\tTime: {(time.perf_counter() - start) * 1000:.0f}ms" - ) - - if i % display_every == 0 or i == max_iter - 1: - # Draw d_i intermediate target as orange ellipse - img = utils.tensor_to_PIL(img) - if img.size[0] != target_resolution: - img = img.resize((target_resolution, target_resolution)) - - utils.draw_handle_target_points(img, handle_points.flip(-1).cpu().long().numpy().tolist(), target_points.flip(-1).cpu().long().numpy().tolist()) - # draw = PIL.ImageDraw.Draw(img) - - - empty.image( - img, caption=f"iter: {i}, loss: {loss:.2f}", width=target_resolution - ) - - # Update the handle points with point tracking - handle_points = point_tracking( - F_resized, - F0_resized, - handle_points, - handle_points_0, - r2, - device, - ) - - return torch.cat([W_layers_to_optimize, W[:, 6:]], dim=1).detach().cpu().numpy() - - -def motion_supervision( - F: torch.Tensor, - F0: torch.Tensor, - handle_points: torch.Tensor, - target_points: torch.Tensor, - r1: int = 3, - lambda_: float = 20.0, - device: torch.device = torch.device("cuda"), - multiplier: float = 1.0, -) -> Tuple[torch.Tensor, List[torch.Tensor]]: - """ - Computes the motion supervision loss and the shifted coordinates for each handle point. - - Args: - F (torch.Tensor): The feature map tensor of shape [batch_size, num_channels, height, width]. - F0 (torch.Tensor): The original feature map tensor of shape [batch_size, num_channels, height, width]. - handle_points (torch.Tensor): The handle points tensor of shape [num_handle_points, 2]. - target_points (torch.Tensor): The target points tensor of shape [num_handle_points, 2]. - r1 (int): The radius of the circular mask around each handle point. - lambda_ (float): The weight of the reconstruction loss for the unmasked region. - device (torch.device): The device to use for the computation. - multiplier (float): The multiplier to use for the direction vector. - - Returns: - A tuple containing the motion supervision loss tensor and a list of shifted coordinates - for each handle point, where each element in the list is a tensor of shape [num_points, 2]. - """ - n = handle_points.shape[0] # Number of handle points - loss = 0.0 - all_shifted_coordinates = [] # List of shifted patches - - for i in range(n): - # Compute direction vector - target2handle = target_points[i] - handle_points[i] - d_i = target2handle / (torch.norm(target2handle) + 1e-7) * multiplier - if torch.norm(d_i) > torch.norm(target2handle): - d_i = target2handle - - # Compute the mask for the pixels within radius r1 of the handle point - mask = utils.create_circular_mask( - F.shape[2], F.shape[3], center=handle_points[i].tolist(), radius=r1 - ).to(device) - # mask = utils.create_square_mask(F.shape[2], F.shape[3], center=handle_points[i].tolist(), radius=r1).to(device) - - # Find indices where mask is True - coordinates = torch.nonzero(mask).float() # shape [num_points, 2] - - # Shift the coordinates in the direction d_i - shifted_coordinates = coordinates + d_i[None] - all_shifted_coordinates.append(shifted_coordinates) - - h, w = F.shape[2], F.shape[3] - - # Extract features in the mask region and compute the loss - F_qi = F[:, :, mask] # shape: [C, H*W] - - # Sample shifted patch from F - normalized_shifted_coordinates = shifted_coordinates.clone() - normalized_shifted_coordinates[:, 0] = ( - 2.0 * shifted_coordinates[:, 0] / (h - 1) - ) - 1 # for height - normalized_shifted_coordinates[:, 1] = ( - 2.0 * shifted_coordinates[:, 1] / (w - 1) - ) - 1 # for width - # Add extra dimensions for batch and channels (required by grid_sample) - normalized_shifted_coordinates = normalized_shifted_coordinates.unsqueeze( - 0 - ).unsqueeze( - 0 - ) # shape [1, 1, num_points, 2] - normalized_shifted_coordinates = normalized_shifted_coordinates.flip( - -1 - ) # grid_sample expects [x, y] instead of [y, x] - normalized_shifted_coordinates = normalized_shifted_coordinates.clamp(-1, 1) - - # Use grid_sample to interpolate the feature map F at the shifted patch coordinates - F_qi_plus_di = torch.nn.functional.grid_sample( - F, normalized_shifted_coordinates, mode="bilinear", align_corners=True - ) - # Output has shape [1, C, 1, num_points] so squeeze it - F_qi_plus_di = F_qi_plus_di.squeeze(2) # shape [1, C, num_points] - - loss += torch.nn.functional.l1_loss(F_qi.detach(), F_qi_plus_di) - - # TODO: add reconstruction loss for the unmasked region - # # Add reconstruction loss for the unmasked region - # loss += lambda_ * torch.norm((F - F0) * (1 - mask_total), p=1) - - return loss, all_shifted_coordinates - - -def point_tracking( - F: torch.Tensor, - F0: torch.Tensor, - handle_points: torch.Tensor, # [N, y, x] - handle_points_0: torch.Tensor, # [N, y, x] - r2: int = 3, - device: torch.device = torch.device("cuda"), -) -> torch.Tensor: - """ - Tracks the movement of handle points in an image using feature matching. - - Args: - F (torch.Tensor): The feature maps tensor of shape [batch_size, num_channels, height, width]. - F0 (torch.Tensor): The feature maps tensor of shape [batch_size, num_channels, height, width] for the initial image. - handle_points (torch.Tensor): The handle points tensor of shape [N, y, x]. - handle_points_0 (torch.Tensor): The handle points tensor of shape [N, y, x] for the initial image. - r2 (int): The radius of the patch around each handle point to use for feature matching. - device (torch.device): The device to use for the computation. - - Returns: - The new handle points tensor of shape [N, y, x]. - """ - n = handle_points.shape[0] # Number of handle points - new_handle_points = torch.zeros_like(handle_points) - - for i in range(n): - # Compute the patch around the handle point - patch = utils.create_square_mask( - F.shape[2], F.shape[3], center=handle_points[i].tolist(), radius=r2 - ).to(device) - - # Find indices where the patch is True - patch_coordinates = torch.nonzero(patch) # shape [num_points, 2] - - # Extract features in the patch - F_qi = F[ - :, :, patch_coordinates[:, 0], patch_coordinates[:, 1] - ] - # Extract feature of the initial handle point - f_i = F0[ - :, :, handle_points_0[i][0].long(), handle_points_0[i][1].long() - ] - - # Compute the L1 distance between the patch features and the initial handle point feature - distances = torch.norm(F_qi - f_i[:, :, None], p=1, dim=1) - - # Find the new handle point as the one with minimum distance - min_index = torch.argmin(distances) - new_handle_points[i] = patch_coordinates[min_index] - - return new_handle_points diff --git a/spaces/smallyu/img-to-music/style.css b/spaces/smallyu/img-to-music/style.css deleted file mode 100644 index 8f7397fe7f0971636015170df075cd2d070344ec..0000000000000000000000000000000000000000 --- a/spaces/smallyu/img-to-music/style.css +++ /dev/null @@ -1,51 +0,0 @@ -#col-container {max-width: 510px; margin-left: auto; margin-right: auto;} -a {text-decoration-line: underline; font-weight: 600;} -div#music-output .h-full { - min-height: 5rem; -} -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; - } - .footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(10px); - background: white; - } - .dark .footer { - border-color: #303030; - } - .dark .footer>p { - background: #0b0f19; - } -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} \ No newline at end of file diff --git a/spaces/srikotha/runwayml-stable-diffusion-v1-5/app.py b/spaces/srikotha/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/srikotha/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/utils.py b/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/utils.py deleted file mode 100644 index b1831853155f22b2e39dfe4cc531359a98e0c75a..0000000000000000000000000000000000000000 --- a/spaces/stamps-labs/stamp2vec/detection_models/yolo_stamp/utils.py +++ /dev/null @@ -1,247 +0,0 @@ -import torch -import pandas as pd -import numpy as np -from pathlib import Path -import matplotlib.pyplot as plt -from .constants import * - - -def output_tensor_to_boxes(boxes_tensor): - """ - Converts the YOLO output tensor to list of boxes with probabilites. - - Arguments: - boxes_tensor -- tensor of shape (S, S, BOX, 5) - - Returns: - boxes -- list of shape (None, 5) - - Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold. - For example, the actual output size of scores would be (10, 5) if there are 10 boxes - """ - cell_w, cell_h = W/S, H/S - boxes = [] - - for i in range(S): - for j in range(S): - for b in range(BOX): - anchor_wh = torch.tensor(ANCHORS[b]) - data = boxes_tensor[i,j,b] - xy = torch.sigmoid(data[:2]) - wh = torch.exp(data[2:4])*anchor_wh - obj_prob = torch.sigmoid(data[4]) - - if obj_prob > OUTPUT_THRESH: - x_center, y_center, w, h = xy[0], xy[1], wh[0], wh[1] - x, y = x_center+j-w/2, y_center+i-h/2 - x,y,w,h = x*cell_w, y*cell_h, w*cell_w, h*cell_h - box = [x,y,w,h, obj_prob] - boxes.append(box) - return boxes - - -def plot_img(img, size=(7,7)): - plt.figure(figsize=size) - plt.imshow(img) - plt.show() - - -def plot_normalized_img(img, std=STD, mean=MEAN, size=(7,7)): - mean = mean if isinstance(mean, np.ndarray) else np.array(mean) - std = std if isinstance(std, np.ndarray) else np.array(std) - plt.figure(figsize=size) - plt.imshow((255. * (img * std + mean)).astype(np.uint)) - plt.show() - - -def read_data(annotations=Path(ANNOTATIONS_PATH)): - """ - Reads annotations data from .csv file. Must contain columns: image_name, bbox_x, bbox_y, bbox_width, bbox_height. - - Arguments: - annotations_path -- string or Path specifying path of annotations file - - Returns: - data -- list of dictionaries containing path, number of boxes and boxes itself - """ - data = [] - - boxes = pd.read_csv(annotations) - image_names = boxes['image_name'].unique() - - for image_name in image_names: - cur_boxes = boxes[boxes['image_name'] == image_name] - img_data = { - 'file_path': image_name, - 'box_nb': len(cur_boxes), - 'boxes': []} - stamp_nb = img_data['box_nb'] - if stamp_nb <= STAMP_NB_MAX: - img_data['boxes'] = cur_boxes[['bbox_x', 'bbox_y','bbox_width','bbox_height']].values - data.append(img_data) - return data - -def xywh2xyxy(x): - """ - Converts xywh format to xyxy - - Arguments: - x -- torch.Tensor or np.array (xywh format) - - Returns: - y -- torch.Tensor or np.array (xyxy) - """ - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[..., 0] = x[..., 0] - y[..., 1] = x[..., 1] - y[..., 2] = x[..., 0] + x[..., 2] - y[..., 3] = x[..., 1] + x[..., 3] - return y - -def boxes_to_tensor(boxes): - """ - Convert list of boxes (and labels) to tensor format - - Arguments: - boxes -- list of boxes - - Returns: - boxes_tensor -- tensor of shape (S, S, BOX, 5) - """ - boxes_tensor = torch.zeros((S, S, BOX, 5)) - cell_w, cell_h = W/S, H/S - for i, box in enumerate(boxes): - x, y, w, h = box - # normalize xywh with cell_size - x, y, w, h = x / cell_w, y / cell_h, w / cell_w, h / cell_h - center_x, center_y = x + w / 2, y + h / 2 - grid_x = int(np.floor(center_x)) - grid_y = int(np.floor(center_y)) - - if grid_x < S and grid_y < S: - boxes_tensor[grid_y, grid_x, :, 0:4] = torch.tensor(BOX * [[center_x - grid_x, center_y - grid_y, w, h]]) - boxes_tensor[grid_y, grid_x, :, 4] = torch.tensor(BOX * [1.]) - return boxes_tensor - - -def target_tensor_to_boxes(boxes_tensor, output_threshold=OUTPUT_THRESH): - """ - Recover target tensor (tensor output of dataset) to bboxes. - Arguments: - boxes_tensor -- tensor of shape (S, S, BOX, 5) - Returns: - boxes -- list of boxes, each box is [x, y, w, h] - """ - cell_w, cell_h = W/S, H/S - boxes = [] - for i in range(S): - for j in range(S): - for b in range(BOX): - data = boxes_tensor[i,j,b] - x_center,y_center, w, h, obj_prob = data[0], data[1], data[2], data[3], data[4] - if obj_prob > output_threshold: - x, y = x_center+j-w/2, y_center+i-h/2 - x,y,w,h = x*cell_w, y*cell_h, w*cell_w, h*cell_h - box = [x,y,w,h] - boxes.append(box) - return boxes - - -def overlap(interval_1, interval_2): - """ - Calculates length of overlap between two intervals. - - Arguments: - interval_1 -- list or tuple of shape (2,) containing endpoints of the first interval - interval_2 -- list or tuple of shape (2, 2) containing endpoints of the second interval - - Returns: - overlap -- length of overlap - """ - x1, x2 = interval_1 - x3, x4 = interval_2 - if x3 < x1: - if x4 < x1: - return 0 - else: - return min(x2,x4) - x1 - else: - if x2 < x3: - return 0 - else: - return min(x2,x4) - x3 - - -def compute_iou(box1, box2): - """ - Compute IOU between box1 and box2. - - Argmunets: - box1 -- list of shape (5, ). Represents the first box - box2 -- list of shape (5, ). Represents the second box - Each box is [x, y, w, h, prob] - - Returns: - iou -- intersection over union score between two boxes - """ - x1,y1,w1,h1 = box1[0], box1[1], box1[2], box1[3] - x2,y2,w2,h2 = box2[0], box2[1], box2[2], box2[3] - - area1, area2 = w1*h1, w2*h2 - intersect_w = overlap((x1,x1+w1), (x2,x2+w2)) - intersect_h = overlap((y1,y1+h1), (y2,y2+w2)) - if intersect_w == w1 and intersect_h == h1 or intersect_w == w2 and intersect_h == h2: - return 1. - intersect_area = intersect_w*intersect_h - iou = intersect_area/(area1 + area2 - intersect_area) - return iou - - -def nonmax_suppression(boxes, iou_thresh = IOU_THRESH): - """ - Removes ovelap bboxes - - Arguments: - boxes -- list of shape (None, 5) - iou_thresh -- maximal value of iou when boxes are considered different - Each box is [x, y, w, h, prob] - - Returns: - boxes -- list of shape (None, 5) with removed overlapping boxes - """ - boxes = sorted(boxes, key=lambda x: x[4], reverse=True) - for i, current_box in enumerate(boxes): - if current_box[4] <= 0: - continue - for j in range(i+1, len(boxes)): - iou = compute_iou(current_box, boxes[j]) - if iou > iou_thresh: - boxes[j][4] = 0 - boxes = [box for box in boxes if box[4] > 0] - return boxes - - - -def yolo_head(yolo_output): - """ - Converts a yolo output tensor to separate tensors of coordinates, shapes and probabilities. - - Arguments: - yolo_output -- tensor of shape (batch_size, S, S, BOX, 5) - - Returns: - xy -- tensor of shape (batch_size, S, S, BOX, 2) containing coordinates of centers of found boxes for each anchor in each grid cell - wh -- tensor of shape (batch_size, S, S, BOX, 2) containing width and height of found boxes for each anchor in each grid cell - prob -- tensor of shape (batch_size, S, S, BOX, 1) containing the probability of presence of boxes for each anchor in each grid cell - """ - xy = torch.sigmoid(yolo_output[..., 0:2]) - anchors_wh = torch.tensor(ANCHORS, device=yolo_output.device).view(1, 1, 1, len(ANCHORS), 2) - wh = torch.exp(yolo_output[..., 2:4]) * anchors_wh - prob = torch.sigmoid(yolo_output[..., 4:5]) - return xy, wh, prob - -def process_target(target): - xy = target[..., 0:2] - wh = target[..., 2:4] - prob = target[..., 4:5] - return xy, wh, prob \ No newline at end of file diff --git a/spaces/stevez/b_demo_hf/test_extract.py b/spaces/stevez/b_demo_hf/test_extract.py deleted file mode 100644 index b5a5fc3f73e29ee211c30b8bab665a771fd62673..0000000000000000000000000000000000000000 --- a/spaces/stevez/b_demo_hf/test_extract.py +++ /dev/null @@ -1,57 +0,0 @@ -import pytest -import json - -from extract import extract - -def test_pure_string(): - inp = 'abc' - token, user, redirect, info = extract(inp) - assert token == inp - assert user is None - assert redirect is None - assert info is None - -def test_json_format_01(): - inp_s = '{"a":"b"}' - token, user, redirect, info = extract(inp_s) - inp = json.loads(inp_s) - assert token == None - assert user == None - assert redirect is None - assert info == inp - -def test_json_format_02(): - inp_s = '{"token":"b"}' - token, user, redirect, info = extract(inp_s) - inp = json.loads(inp_s) - assert token == inp['token'] - assert user == None - assert redirect is None - assert info == inp - -def test_json_format_03(): - inp_s = '{"user":"xb"}' - token, user, redirect, info = extract(inp_s) - inp = json.loads(inp_s) - assert token == None - assert user == inp['user'] - assert redirect is None - assert info == inp - -def test_json_format_04(): - inp_s = '{"user":"t1", "token":"t2"}' - token, user, redirect, info = extract(inp_s) - inp = json.loads(inp_s) - assert token == inp['token'] - assert user == inp['user'] - assert redirect is None - assert info == inp - -def test_json_format_05(): - inp_s = '{"user":"t1", "token":"t2", "redirect":"xx"}' - token, user, redirect, info = extract(inp_s) - inp = json.loads(inp_s) - assert token == inp['token'] - assert user == inp['user'] - assert redirect == inp['redirect'] - assert info == inp diff --git a/spaces/stomexserde/gpt4-ui/Autodesk-Autocad-Map-3d-2014-X86-X64-Torrent.md b/spaces/stomexserde/gpt4-ui/Autodesk-Autocad-Map-3d-2014-X86-X64-Torrent.md deleted file mode 100644 index 5fe4f2d94703e6fafa713b3cf771a4d2c44708a1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Autodesk-Autocad-Map-3d-2014-X86-X64-Torrent.md +++ /dev/null @@ -1,67 +0,0 @@ -## autodesk autocad map 3d 2014 x86 x64 torrent - - - -**Autodesk Autocad Map 3d 2014 X86 X64 Torrent ::: [https://urlcod.com/2tw0yu](https://urlcod.com/2tw0yu)** - - - - Here is a possible title and article for your keyword: - -# How to Download and Install Autodesk AutoCAD Map 3D 2014 x86 x64 Torrent - - - -If you are looking for a powerful and complex mapping solution that can handle both CAD and GIS data, you might want to try Autodesk AutoCAD Map 3D 2014. This software is designed to help you plan and manage your infrastructure projects with intelligent industry data models and tools. You can also access and edit a wide range of data sources, such as web services, databases, and files. - - - -However, if you don't have a license or subscription for this software, you might be tempted to download it from a torrent site. This is not recommended, as it can expose you to legal risks, malware, viruses, and performance issues. Moreover, you might not get the full functionality and support that you need for your projects. - - - -Therefore, the best way to get Autodesk AutoCAD Map 3D 2014 is to download it from the official website of Autodesk. You can either buy a license or sign up for a free trial that lasts for 30 days. This way, you can enjoy the benefits of a genuine and updated software that meets your needs. - - - -To download and install Autodesk AutoCAD Map 3D 2014 from the official website, follow these steps: - - - -1. Go to [https://www.autodesk.com/products/autocad-map-3d/overview](https://www.autodesk.com/products/autocad-map-3d/overview) and click on the "Download Free Trial" button. - -2. Fill in the required information and click on "Next". You will receive an email with a download link and instructions. - -3. Click on the download link in the email and save the file to your computer. - -4. Run the file and follow the installation wizard. You will need to enter your serial number and product key that you received in the email. - -5. Once the installation is complete, launch the software and activate it with your Autodesk account. - - - -Congratulations! You have successfully downloaded and installed Autodesk AutoCAD Map 3D 2014 on your computer. You can now start creating and managing your maps with ease and accuracy. - -Here is a possible continuation of the article: - -Now that you have Autodesk AutoCAD Map 3D 2014 on your computer, you might be wondering what are some of the features and benefits of this software. Here are some of the highlights: - - - -- You can directly access and edit spatial data from various sources, such as web services, databases, and files, using Feature Data Objects (FDO) technology[^1^] [^2^]. This allows you to work with data from different formats and coordinate systems without conversion or translation. - -- You can connect to ArcGIS and streamline the flow of data between the two platforms. You can also keep your features information up to date and synchronized with ArcGIS[^1^] [^2^]. This enables you to collaborate with other GIS professionals and share your maps easily. - -- You can convert data between DWG and GIS formats with high fidelity using MapImport and MapExport. You can also import and export data to other common formats, such as SHP, SDF, and DGN[^1^] [^2^]. This gives you more flexibility and compatibility with your data sources and outputs. - -- You can create and edit precise maps with advanced drafting and editing tools. You can also use industry-specific symbols, styles, and standards to enhance your maps[^1^] [^2^]. This helps you to produce accurate and professional-looking maps that meet your requirements. - -- You can manage your infrastructure systems with Enterprise Industry Models. You can create, maintain, and analyze data models for various types of infrastructure, such as water, gas, electric, and wastewater[^1^] [^2^]. This allows you to improve your planning and decision-making processes for your infrastructure projects. - -- You can create powerful maps with analysis and visualization tools. You can perform spatial analysis, such as buffering, overlaying, and querying. You can also create thematic maps, 3D models, and animations[^1^] [^2^]. This helps you to communicate your insights and findings effectively. - - - -As you can see, Autodesk AutoCAD Map 3D 2014 is a comprehensive mapping solution that can handle both CAD and GIS data. It can help you to plan, design, and manage your infrastructure projects with ease and efficiency. If you want to learn more about this software, you can watch this video[^3^] that shows some of the new features in action. - - dfd1c89656 \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Criminal Justice Today 13th Edition Pdf Download Free.md b/spaces/stomexserde/gpt4-ui/Examples/Criminal Justice Today 13th Edition Pdf Download Free.md deleted file mode 100644 index 26ff1ac7f891662b69ce6e6fdd4ee90caf95f478..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Criminal Justice Today 13th Edition Pdf Download Free.md +++ /dev/null @@ -1,24 +0,0 @@ -
        -```markdown -

        How to Download Criminal Justice Today 13th Edition PDF for Free

        -

        If you are looking for a comprehensive and up-to-date introduction to the field of criminal justice, you might be interested in Criminal Justice Today: An Introductory Text for the 21st Century, written by Frank Schmalleger. This textbook covers the history, structure, functions, and challenges of the criminal justice system in the United States, as well as the latest trends and developments in crime and justice. It also features real-world examples, critical thinking questions, and interactive learning tools to help you master the concepts and skills you need for your career.

        -

        criminal justice today 13th edition pdf download free


        Download Ziphttps://urlgoal.com/2uI8HB



        -

        However, buying a new copy of this textbook can be quite expensive, especially if you are on a tight budget. That's why many students are looking for ways to download Criminal Justice Today 13th Edition PDF for free online. But is it legal and safe to do so? And where can you find a reliable source for the PDF file?

        -

        Is it Legal to Download Criminal Justice Today 13th Edition PDF for Free?

        -

        The short answer is no. According to the U.S. Copyright Law, it is illegal to reproduce, distribute, or publicly display any copyrighted work without the permission of the owner or the law. This means that downloading or sharing a PDF file of Criminal Justice Today 13th Edition without paying for it is a violation of the law and can result in serious consequences.

        -

        Some websites may claim to offer free PDF downloads of Criminal Justice Today 13th Edition, but they are likely to be either scams or pirated copies. Scam websites may try to trick you into providing your personal or financial information, downloading malware or viruses, or signing up for unwanted subscriptions or services. Pirated copies may be incomplete, inaccurate, outdated, or corrupted, and may also contain harmful software that can damage your device or compromise your security.

        -

        Therefore, it is not worth risking your legal rights, academic integrity, or computer safety by downloading Criminal Justice Today 13th Edition PDF for free from unauthorized sources. The only legal and safe way to access this textbook online is to purchase it from a reputable publisher or retailer.

        -

        Where to Buy Criminal Justice Today 13th Edition PDF?

        -

        If you want to buy Criminal Justice Today 13th Edition PDF online, you have several options to choose from. Here are some of the most popular ones:

        -
          -
        • Pearson: Pearson is the official publisher of Criminal Justice Today 13th Edition. You can buy the PDF version of this textbook from their website for $79.99. You can also access additional online resources such as quizzes, videos, flashcards, and simulations through their Revel platform.
        • -
        • Amazon: Amazon is one of the largest online retailers in the world. You can buy the PDF version of Criminal Justice Today 13th Edition from their website for $59.99. You can also rent the PDF version for $29.99 for 180 days. You can also read the PDF version on your Kindle device or app.
        • -
        • Chegg: Chegg is a leading online platform for students. You can buy or rent the PDF version of Criminal Justice Today 13th Edition from their website for various prices depending on the duration and format. You can also access homework help, tutoring, and study guides through their Chegg Study service.
        • -
        -

        These are just some of the examples of where you can buy Criminal Justice Today 13th Edition PDF online. You may also find other websites that offer this textbook for sale or rent. However, make sure to check their credibility and reviews before making a purchase.

        -

        -

        Conclusion

        -

        Criminal Justice Today 13th Edition is a great textbook for anyone who wants to learn about the criminal justice system in the United States. However, downloading it for free online is illegal and unsafe. The best way to get this textbook online is to buy it from a legitimate source such as Pearson, Amazon, or Chegg. By doing so, you will not only support the author and publisher but also ensure that you get a high-quality and updated version of the textbook that will help you succeed in your studies.

        -```

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/GameGuru Mega Pack 3 Crack Highly Compressed TOP.md b/spaces/stomexserde/gpt4-ui/Examples/GameGuru Mega Pack 3 Crack Highly Compressed TOP.md deleted file mode 100644 index d37500f0798016be2d777c7b1dca44a1a5aea0d3..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/GameGuru Mega Pack 3 Crack Highly Compressed TOP.md +++ /dev/null @@ -1,27 +0,0 @@ - -

        How to Download GameGuru Mega Pack 3 Crack Highly Compressed

        -

        If you are looking for a way to download GameGuru Mega Pack 3 crack highly compressed, you have come to the right place. GameGuru Mega Pack 3 is a DLC for GameGuru Classic, a game creation software that allows you to make your own games easily and quickly. GameGuru Mega Pack 3 contains over 400 game ready entities, including war torn buildings, industrial machinery, medical items, sports equipment, and much more. You can use these assets to create your own games and sell them legally.

        -

        However, if you want to try GameGuru Mega Pack 3 for free, you might be tempted to download a cracked version of it from the internet. But beware, downloading cracked software can be risky and illegal. You might end up with viruses, malware, or legal issues. That's why we recommend you to download GameGuru Mega Pack 3 crack highly compressed from our trusted and secure link. This way, you can enjoy GameGuru Mega Pack 3 without any hassle or worry.

        -

        GameGuru Mega Pack 3 Crack Highly Compressed


        Download ===== https://urlgoal.com/2uI7mE



        -

        How to Download GameGuru Mega Pack 3 Crack Highly Compressed

        -

        To download GameGuru Mega Pack 3 crack highly compressed, follow these simple steps:

        -
          -
        1. Click on the download button below to go to our secure link.
        2. -
        3. Complete a quick verification to prove that you are not a robot.
        4. -
        5. Download the GameGuru Mega Pack 3 crack highly compressed file.
        6. -
        7. Extract the file using WinRAR or any other software that can handle ZIP files.
        8. -
        9. Run the setup.exe file and follow the instructions to install GameGuru Mega Pack 3 crack highly compressed.
        10. -
        11. Enjoy creating your own games with GameGuru Mega Pack 3!
        12. -
        -

        That's it! You have successfully downloaded GameGuru Mega Pack 3 crack highly compressed. Now you can unleash your creativity and make your own games with hundreds of assets at your disposal. You can also check out our other posts for more tips and tricks on how to use GameGuru Classic and its DLCs.

        -

        Why Choose GameGuru Mega Pack 3 Crack Highly Compressed

        -

        You might be wondering why you should choose GameGuru Mega Pack 3 crack highly compressed over other options. Here are some of the benefits of downloading GameGuru Mega Pack 3 crack highly compressed from our link:

        -
          -
        • You will save time and space. The original size of GameGuru Mega Pack 3 is about 4 GB, but our crack highly compressed file is only about 1 GB. That means you will download it faster and use less storage space on your device.
        • -
        • You will get access to all the features of GameGuru Mega Pack 3. Our crack highly compressed file is fully functional and does not have any limitations or restrictions. You can use all the assets and tools of GameGuru Mega Pack 3 without any problem.
        • -
        • You will be safe and secure. Our link is verified and tested by many users who have downloaded GameGuru Mega Pack 3 crack highly compressed successfully. We do not host any malicious or illegal files on our server. You can download GameGuru Mega Pack 3 crack highly compressed with confidence and peace of mind.
        • -
        -

        So what are you waiting for? Download GameGuru Mega Pack 3 crack highly compressed today and start making your own games with ease!

        -

        e93f5a0c3f
        -
        -
        \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/document_store/test_milvus_store.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/document_store/test_milvus_store.py deleted file mode 100644 index 1cf65776dfad0c612104aec2c09c377e6c2003e1..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/document_store/test_milvus_store.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/6/11 21:08 -@Author : alexanderwu -@File : test_milvus_store.py -""" -import random - -import numpy as np - -from metagpt.document_store.milvus_store import MilvusConnection, MilvusStore -from metagpt.logs import logger - -book_columns = {'idx': int, 'name': str, 'desc': str, 'emb': np.ndarray, 'price': float} -book_data = [ - [i for i in range(10)], - [f"book-{i}" for i in range(10)], - [f"book-desc-{i}" for i in range(10000, 10010)], - [[random.random() for _ in range(2)] for _ in range(10)], - [random.random() for _ in range(10)], -] - - -def test_milvus_store(): - milvus_connection = MilvusConnection(alias="default", host="192.168.50.161", port="30530") - milvus_store = MilvusStore(milvus_connection) - milvus_store.drop('Book') - milvus_store.create_collection('Book', book_columns) - milvus_store.add(book_data) - milvus_store.build_index('emb') - milvus_store.load_collection() - - results = milvus_store.search([[1.0, 1.0]], field='emb') - logger.info(results) - assert results diff --git a/spaces/sukiru/BlueArchiveTTS/transforms.py b/spaces/sukiru/BlueArchiveTTS/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/sukiru/BlueArchiveTTS/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aveyond Rhens Quest Free Download Full 22 VERIFIED.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aveyond Rhens Quest Free Download Full 22 VERIFIED.md deleted file mode 100644 index dc90e4566e0320d35c85193093d2573f4660f032..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Aveyond Rhens Quest Free Download Full 22 VERIFIED.md +++ /dev/null @@ -1,20 +0,0 @@ -
        -

        How to Download Aveyond 1: Rhen's Quest for Free

        -

        If you are a fan of classic RPGs like Final Fantasy, Chrono Trigger, and Dragon Warrior, you might want to check out Aveyond 1: Rhen's Quest. This game is the first installment in the popular Aveyond series, which follows the adventures of Rhen, a young girl who discovers she is the chosen one to stop the evil deity Ahriman from destroying the world.

        -

        Aveyond 1: Rhen's Quest is packed with more than 60 quests, hundreds of places to explore, and over 30 hours of gameplay. You can fight monsters, learn sword magic, join a guild, get married, and more. You can also choose from different endings depending on your choices and actions throughout the game.

        -

        aveyond rhen's quest free download full 22


        Download Ziphttps://cinurl.com/2uEYmR



        -

        But how can you download Aveyond 1: Rhen's Quest for free? Well, there are a few ways to do that. Here are some of them:

        -
          -
        • Download the trial version from the official website. The trial version lets you play for one hour, which is enough to get a taste of the game. If you like it, you can buy the full version for $19.99[^1^].
        • -
        • Download the full version from a third-party website. There are some websites that offer free downloads of Aveyond 1: Rhen's Quest, but be careful as they might contain viruses or malware. Always scan your files before opening them and use a reliable antivirus software.
        • -
        • Download the full version from a torrent site. Torrent sites are another option to get free downloads of Aveyond 1: Rhen's Quest, but they also come with risks. You might download fake or corrupted files, or get into trouble with the law for piracy. Torrenting is illegal in some countries and regions, so make sure you know the laws before you do it.
        • -
        -

        As you can see, there are some ways to download Aveyond 1: Rhen's Quest for free, but they are not without drawbacks. The best and safest way to enjoy this game is to buy it from the official website or a trusted online store. That way, you can support the developers and get updates and bug fixes.

        -

        Aveyond 1: Rhen's Quest is a great game that will keep you entertained for hours. If you love RPGs, you should definitely give it a try. You can download it from here[^1^] or here[^2^]. Have fun!

        - -

        If you want to learn more about Aveyond 1: Rhen's Quest, you can visit the Aveyond Wiki, where you can find detailed information on the game's story, characters, quests, items, and more. You can also join the Aveyond Forums, where you can chat with other fans, share tips and tricks, and get help if you are stuck.

        -

        Aveyond 1: Rhen's Quest is a game that will appeal to both old-school and new RPG lovers. It has a rich and immersive world, a captivating plot, a diverse and memorable cast of characters, and a lot of replay value. Whether you want to save the world from Ahriman, join a guild, get married, or just explore the lands of Aia, you will find something to enjoy in this game.

        -

        So what are you waiting for? Download Aveyond 1: Rhen's Quest for free today and start your adventure!

        -

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imperial Reckoning The Untold Story Of Britains Gulag In Kenya Free Download Fixed.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imperial Reckoning The Untold Story Of Britains Gulag In Kenya Free Download Fixed.md deleted file mode 100644 index 7bba807ecedfbc1309787f1865dd8eb3bc6d969c..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Imperial Reckoning The Untold Story Of Britains Gulag In Kenya Free Download Fixed.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Imperial Reckoning: The Untold Story of Britain's Gulag in Kenya free download


        Download Zip ✏ ✏ ✏ https://cinurl.com/2uEYi4



        - -As part of the allied forces, thousands of Kenyans fought for the British in World War II. But just a few years after Hitler's defeat, the British. Empire The Rise and Fall of the British World Order. As part of the allied forces, thousands of Kenyans fought for the British in World War II. But just a few years after the defeat of Hitler, the British tried to restore their colonial empire, but met with fierce resistance from both former subjects and African peoples. By the end of the war, there were more than 300,000 refugees in Kenya from British East Africa (now Kenya). 8a78ff9644
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Official Motorola Moto G7 XT1962-6 (RIVER) Stock Rom [PATCHED].md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Official Motorola Moto G7 XT1962-6 (RIVER) Stock Rom [PATCHED].md deleted file mode 100644 index 7f7e491c9431098e1f2e5bda96c6a41e104de251..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Official Motorola Moto G7 XT1962-6 (RIVER) Stock Rom [PATCHED].md +++ /dev/null @@ -1,11 +0,0 @@ -

        Official Motorola Moto G7 XT1962-6 (RIVER) Stock Rom


        Download Filehttps://cinurl.com/2uEYsP



        -
        -March 21, 2019 - Is this the original firmware with a factory signature? (i.e. is it possible to completely flash and re-block the bootloader?) It is also interesting if anyone has the latest ... Read moreMarch 21, 2019 - Is this the original firmware with a factory signature? (i.e. is it possible to completely flash and re-lock the bootloader?) Just wondering if anyone has the latest kernel (falconfs). -Hide -What is Android phone firmware and why is it needed? ... -How to roll back the firmware on Android and return the previous version? -Read moreWhat is Android phone firmware and why is it needed? -To understand this issue 8a78ff9644
        -
        -
        -

        diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Undisputed123720pindualaudio16.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Undisputed123720pindualaudio16.md deleted file mode 100644 index 7ebb01910cbaaf2a61fc5da457f9f92f55c03295..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Undisputed123720pindualaudio16.md +++ /dev/null @@ -1,123 +0,0 @@ -
        -

        How to Download or Stream Undisputed 1 2 3 720p in Dual Audio

        - -

        Are you looking for a way to watch the thrilling action movies Undisputed 1 2 3 in high definition and dual audio? If yes, then you have come to the right place. In this article, we will show you how to download or stream Undisputed 1 2 3 720p in dual audio, which means you can enjoy the movies in both English and Hindi languages.

        -

        undisputed123720pindualaudio16


        Download ✔✔✔ https://cinurl.com/2uEXJs



        - -

        Undisputed 1 2 3 are a series of movies that focus on underground prison fighting tournaments, where the best fighters from different prisons compete for the title of the undisputed champion. The movies feature some of the best martial artists and actors in the world, such as Wesley Snipes, Ving Rhames, Michael Jai White, and Scott Adkins. The movies are known for their realistic and brutal fight scenes, as well as their compelling stories of redemption, honor, and survival.

        - -

        Why should you watch Undisputed 1 2 3 720p in dual audio?

        - -

        There are many benefits of watching Undisputed 1 2 3 720p in dual audio. Here are some of them:

        - -
          -
        • You can watch the movies in high definition quality, which enhances the visual effects and details of the fight scenes.
        • -
        • You can choose between English and Hindi languages, which gives you more options and flexibility.
        • -
        • You can appreciate the different accents and expressions of the actors, which adds more flavor and realism to the characters.
        • -
        • You can learn some new words and phrases in both languages, which can improve your vocabulary and communication skills.
        • -
        • You can have more fun and excitement watching the movies with your friends and family who speak different languages.
        • -
        - -

        How to download or stream Undisputed 1 2 3 720p in dual audio?

        - -

        There are many ways to download or stream Undisputed 1 2 3 720p in dual audio. You can use various websites that offer this service. However, you need to be careful about the quality and safety of these websites. Some of them may have low-quality videos or malicious ads that can harm your device or data.

        - -

        That's why we recommend you to use our website, which is one of the best and most reliable sources for downloading or streaming Undisputed 1 2 3 720p in dual audio. Our website has several features that make it stand out from other websites:

        - -
          -
        • We have high-quality videos that are clear and smooth.
        • -
        • We have fast and secure servers that ensure uninterrupted downloading or streaming.
        • -
        • We have no annoying ads or pop-ups that disturb your viewing experience.
        • -
        • We have easy and user-friendly interface that allows you to navigate and search for your favorite movies.
        • -
        • We have regular updates and new releases that keep you entertained and satisfied.
        • -
        - -

        So what are you waiting for? Visit our website today and download or stream Undisputed 1 2 3 720p in dual audio. You will not regret it!

        - - ---> ServiceClient failure for DeepLeo[/ERROR] - - ---> ServiceClient failure for DeepLeo[/ERROR] -

        What are the reviews and ratings of Undisputed 1 2 3 720p in dual audio?

        - -

        Undisputed 1 2 3 720p in dual audio have received positive reviews and ratings from critics and audiences alike. The movies have been praised for their action, drama, and entertainment value. Here are some of the reviews and ratings of Undisputed 1 2 3 720p in dual audio:

        -

        - -
          -
        • Undisputed (2002) has a rating of 6.2 out of 10 on IMDb, based on over 28,000 votes. It has a rating of 48% on Rotten Tomatoes, based on 69 reviews. It has a rating of 58 out of 100 on Metacritic, based on 26 reviews.
        • -
        • Undisputed II: Last Man Standing (2006) has a rating of 7.1 out of 10 on IMDb, based on over 35,000 votes. It has a rating of 100% on Rotten Tomatoes, based on 5 reviews. It has a rating of N/A on Metacritic, based on N/A reviews.
        • -
        • Undisputed III: Redemption (2010) has a rating of 7.4 out of 10 on IMDb, based on over 36,000 votes. It has a rating of N/A on Rotten Tomatoes, based on N/A reviews. It has a rating of N/A on Metacritic, based on N/A reviews.
        • -
        - -

        As you can see, Undisputed 1 2 3 720p in dual audio have received mostly positive feedback from the viewers and critics. The movies have been appreciated for their thrilling and realistic fight scenes, as well as their engaging and inspiring stories.

        - -

        How to watch Undisputed 1 2 3 720p in dual audio offline?

        - -

        If you want to watch Undisputed 1 2 3 720p in dual audio offline, you can download them from our website and save them on your device. This way, you can watch the movies anytime and anywhere you want, without any internet connection or buffering issues.

        - -

        To download Undisputed 1 2 3 720p in dual audio from our website, you need to follow these simple steps:

        - -
          -
        1. Visit our website and search for Undisputed 1 2 3 720p in dual audio.
        2. -
        3. Select the movie you want to download and click on the download button.
        4. -
        5. Choose the quality and format you want to download and click on the confirm button.
        6. -
        7. Wait for the download to finish and enjoy watching the movie offline.
        8. -
        - -

        Downloading Undisputed 1 2 3 720p in dual audio from our website is easy and fast. You can download the movies in various qualities and formats, such as MP4, MKV, AVI, etc. You can also choose the language you want to download, such as English or Hindi.

        - -

        So don't wait any longer and download Undisputed 1 2 3 720p in dual audio from our website today. You will love watching these amazing action movies offline.

        -

        What are the best scenes of Undisputed 1 2 3 720p in dual audio?

        - -

        Undisputed 1 2 3 720p in dual audio have many memorable and exciting scenes that will keep you on the edge of your seat. Here are some of the best scenes of Undisputed 1 2 3 720p in dual audio:

        - -
          -
        • The final fight between Monroe Hutchen (Wesley Snipes) and George "Iceman" Chambers (Ving Rhames) in Undisputed (2002). This is the ultimate showdown between two legendary boxers who have different styles and personalities. The fight is intense, brutal, and unpredictable, as both men give their all to prove who is the undisputed champion of the prison.
        • -
        • The first fight between George "Iceman" Chambers (Michael Jai White) and Yuri Boyka (Scott Adkins) in Undisputed II: Last Man Standing (2006). This is the fight that introduces Boyka as the most complete fighter in the world, who can use any martial art and strike from any angle. Chambers is caught off guard by Boyka's speed, agility, and power, and suffers a humiliating defeat. The fight showcases Boyka's signature moves, such as the spinning hook kick and the flying knee.
        • -
        • The final fight between Yuri Boyka (Scott Adkins) and Dolor (Marko Zaror) in Undisputed III: Redemption (2010). This is the fight that determines who will win the freedom and the glory of the international fighting competition. Boyka faces his toughest opponent yet, Dolor, who is a giant and a savage fighter who enjoys inflicting pain. The fight is a clash of titans, as both men unleash their fury and skill. The fight also has an emotional twist, as Boyka dedicates his victory to Turbo (Mykel Shannon Jenkins), his friend and ally who was killed by Dolor.
        • -
        - -

        These are some of the best scenes of Undisputed 1 2 3 720p in dual audio that will make you cheer, gasp, and applaud. You can watch these scenes and more by downloading or streaming Undisputed 1 2 3 720p in dual audio from our website.

        - -

        What are the benefits of watching Undisputed 1 2 3 720p in dual audio?

        - -

        Watching Undisputed 1 2 3 720p in dual audio is not only entertaining, but also beneficial for your health and well-being. Here are some of the benefits of watching Undisputed 1 2 3 720p in dual audio:

        - -
          -
        • You can improve your physical fitness by watching the fight scenes and learning from the fighters' techniques, movements, and stamina. You can also get inspired by their dedication, discipline, and determination to overcome their challenges.
        • -
        • You can improve your mental health by watching the stories and characters of the movies and relating to their struggles, emotions, and motivations. You can also get motivated by their courage, resilience, and growth.
        • -
        • You can improve your social skills by watching the movies with your friends and family who speak different languages and sharing your opinions, insights, and feelings. You can also learn from their perspectives, cultures, and values.
        • -
        - -

        Watching Undisputed 1 2 3 720p in dual audio is a great way to have fun, learn, and grow. You can watch these movies by downloading or streaming them from our website.

        -

        What are the cast and crew of Undisputed 1 2 3 720p in dual audio?

        - -

        Undisputed 1 2 3 720p in dual audio have a talented and diverse cast and crew who have worked hard to bring these movies to life. Here are some of the cast and crew of Undisputed 1 2 3 720p in dual audio:

        - -
          -
        • Undisputed (2002) was directed by Walter Hill, who is known for his action and western movies, such as The Warriors, 48 Hrs., and Last Man Standing. The screenplay was written by David Giler and Walter Hill, based on a story by David Giler. The movie stars Wesley Snipes as Monroe Hutchen, Ving Rhames as George "Iceman" Chambers, Peter Falk as Mendy Ripstein, Michael Rooker as A.J. Mercker, Jon Seda as Jesus "Chuy" Campos, Wes Studi as Mingo Pace, Fisher Stevens as James "Ratbag" Kroycek, and Master P as Gat Boyz Rapper 1.
        • -
        • Undisputed II: Last Man Standing (2006) was directed by Isaac Florentine, who is known for his martial arts and action movies, such as Ninja, Ninja: Shadow of a Tear, and Close Range. The screenplay was written by James Townsend and David N. White, based on characters created by David Giler and Walter Hill. The movie stars Michael Jai White as George "Iceman" Chambers, Scott Adkins as Yuri Boyka, Eli Danker as Crot, Ben Cross as Steven Parker, Mark Ivanir as Gaga, Ken Lerner as Phil Goldstein, Daisy Lang as Svetlana, Silvio Simac as Davic, and Ivaylo Geraskov as Alexi.
        • -
        • Undisputed III: Redemption (2010) was also directed by Isaac Florentine and written by David N. White, based on characters created by David Giler and Walter Hill. The movie stars Scott Adkins as Yuri Boyka, Mykel Shannon Jenkins as Turbo Smith, Mark Ivanir as Gaga, Hristo Shopov as Warden Kuss, Marko Zaror as Dolor Quinones, Michael Baral as Casino Manager Saul Mamani, Ilram Choi as The Asian Fighter Sykov, Robert Costanzo as Farnatti, Lateef Crowder as Santiago Silva, Esteban Cueto as Victor Ortiz Calderon, Vernon Dobtcheff as Rezo Pogodin.
        • -
        - -

        These are some of the cast and crew of Undisputed 1 2 3 720p in dual audio who have contributed to the success and popularity of these movies. You can watch their performances and work by downloading or streaming Undisputed 1 2 3 720p in dual audio from our website.

        - -

        What are the trivia and facts of Undisputed 1 2 3 720p in dual audio?

        - -

        Undisputed 1 2 3 720p in dual audio have many interesting trivia and facts that you may not know. Here are some of them:

        - -
          -
        • Undisputed (2002) was inspired by the real-life story of Mike Tyson, who was convicted of rape in 1992 and served three years in prison. Tyson was also challenged by several inmates to fight for the prison's championship title.
        • -
        • Undisputed II: Last Man Standing (2006) was originally intended to be a direct sequel to Undisputed (2002), with Wesley Snipes and Ving Rhames reprising their roles. However, due to Snipes' tax evasion charges and Rhames' salary demands, the producers decided to recast the roles with Michael Jai White and Scott Adkins.
        • -
        • Undisputed III: Redemption (2010) was the first movie to feature Scott Adkins as the main protagonist. Adkins had previously played supporting or villainous roles in other movies. Adkins also performed most of his own stunts and fight choreography in the movie.
        • -
        - -

        These are some of the trivia and facts of Undisputed 1 2 3 720p in dual audio that will make you appreciate these movies more. You can learn more about these movies by downloading or streaming Undisputed 1 2 3 720p in dual audio from our website.

        -

        Conclusion

        - -

        Undisputed 1 2 3 720p in dual audio are some of the best action movies that you can watch. They have amazing fight scenes, captivating stories, and impressive cast and crew. They also have many benefits, such as improving your physical fitness, mental health, and social skills. They also have many trivia and facts that will make you enjoy them more.

        - -

        If you want to watch Undisputed 1 2 3 720p in dual audio, you can download or stream them from our website. Our website is the best and most reliable source for these movies. We have high-quality videos, fast and secure servers, no annoying ads or pop-ups, easy and user-friendly interface, and regular updates and new releases.

        - -

        So don't miss this opportunity and visit our website today. You will love watching Undisputed 1 2 3 720p in dual audio. They are the ultimate action movies that will keep you entertained and satisfied.

        3cee63e6c2
        -
        -
        \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Premiere Pro CC 2018 V12.0 X64 Full With Crack Serial Key Keygen.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Premiere Pro CC 2018 V12.0 X64 Full With Crack Serial Key Keygen.md deleted file mode 100644 index b461ff255cc326fce343360b9cbc3aa8d5c0c041..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Adobe Premiere Pro CC 2018 V12.0 X64 Full With Crack Serial Key Keygen.md +++ /dev/null @@ -1,11 +0,0 @@ -

        Adobe Premiere Pro CC 2018 v12.0 x64 Full with Crack Serial Key keygen


        Download 🌟 https://urluss.com/2uCGCm



        - -Dec 15, 2017 - You can quickly and easily set the encoding bitrate, target bitrate and maximum bitrate. Adobe Media Encoder CC 2018 with patch and activation pack has ... Adobe Media Encoder CC 2018 with patch and activation pack has only 2 versions unlike previous versions. -Adobe Media Encoder CC 2018 - Download. -Adobe Media Encoder - download Adobe Media Encoder CC 2018 21.1.0.38 for free. -Adobe Media Encoder - A program designed for video encoding. -The advantages of the software can be attributed to the high speed of work, as well as support for the most modern technologies. -Here you can also find additional tools for working with frames. 8a78ff9644
        -
        -
        -

        diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/AdobeCreativeCloudCC2018ColllectionForPCSerialKeykeygen !NEW!.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/AdobeCreativeCloudCC2018ColllectionForPCSerialKeykeygen !NEW!.md deleted file mode 100644 index dd086006e4d54d153cc33fdc4142910ba2f6f9f8..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/AdobeCreativeCloudCC2018ColllectionForPCSerialKeykeygen !NEW!.md +++ /dev/null @@ -1,18 +0,0 @@ -

        AdobeCreativeCloudCC2018ColllectionForPCSerialKeykeygen


        DOWNLOADhttps://urluss.com/2uCFZY



        -
        -If you’re looking to create a custom key pair, the process is a bit more complicated and involves a couple of extra steps. Please see our guide to creating a custom key pair for more information.Safety and tolerability of the fixed combination of roflumilast, montelukast, and theophylline in patients with asthma and chronic obstructive pulmonary disease (PIERCE): results from a phase 2, multicentre, open-label trial. - -To evaluate the tolerability of the oral fixed combination of roflumilast, montelukast and theophylline in patients with asthma and chronic obstructive pulmonary disease (COPD). A phase 2, open-label, multicentre, multi-arm trial. Randomised patients (N = 926) from 22 sites were treated with the fixed-dose combination of roflumilast (500 μg twice daily), montelukast (10 mg once daily) and theophylline (200 mg twice daily) for 28 days. At baseline, there were more men and patients aged ≥ 65 years, and more patients with asthma and COPD were receiving inhaled corticosteroids and other maintenance therapy. During the treatment period, all patients (n = 906) had asthma (n = 534) or COPD (n = 369) as confirmed by clinical and spirometric assessment. Overall, patients reported only mild adverse events, the most common being headache (6.7%), fatigue (6.3%), nausea (5.1%) and diarrhoea (3.2%). Patients with COPD reported more adverse events than patients with asthma (48.6% vs. 43.9%). Treatment with the fixed-dose combination of roflumilast, montelukast and theophylline in this study was well tolerated in patients with asthma and COPD. There were no clinically meaningful differences between the safety profiles of the combination in patients with COPD and those with asthma.Q: - -How to make dropdown menu bigger? - -I have one html page that has a dropdown menu. I wanted it to be bigger, like there would be two rows of options, but when I put overflow:hidden on it, the dropdown menu become to small. - -I tried like this but the dropdown becomes to small. - - - - < 4fefd39f24
        -
        -
        -

        diff --git a/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/eval/benchmark/metrics/__init__.py b/spaces/svjack/ControlNet-Face-Chinese/SPIGA/spiga/eval/benchmark/metrics/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py deleted file mode 100644 index b75b1566c9f18169cee51d4b55d75e0357b69c57..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/core/seg/sampler/base_pixel_sampler.py +++ /dev/null @@ -1,12 +0,0 @@ -from abc import ABCMeta, abstractmethod - - -class BasePixelSampler(metaclass=ABCMeta): - """Base class of pixel sampler.""" - - def __init__(self, **kwargs): - pass - - @abstractmethod - def sample(self, seg_logit, seg_label): - """Placeholder for sample function.""" diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/__init__.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/tanishqvashisht/catVsDog/app.py b/spaces/tanishqvashisht/catVsDog/app.py deleted file mode 100644 index d84163662c297560cd80b56ea77385de5c40943e..0000000000000000000000000000000000000000 --- a/spaces/tanishqvashisht/catVsDog/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import streamlit as st -from PIL import Image -import torch -from torch import nn as nn -from torchvision.transforms import transforms - - - -class CNNModel(nn.Module): - def __init__(self): - super(CNNModel, self).__init__() - self.conv_layers = nn.Sequential( - nn.Conv2d(3, 32, kernel_size=3, padding=0), - nn.ReLU(), - nn.BatchNorm2d(32), - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(32, 64, kernel_size=3, padding=0), - nn.ReLU(), - nn.BatchNorm2d(64), - nn.MaxPool2d(kernel_size=2, stride=2), - nn.Conv2d(64, 128, kernel_size=3, padding=0), - nn.ReLU(), - nn.BatchNorm2d(128), - nn.MaxPool2d(kernel_size=2, stride=2) - ) - self.fc_layers = nn.Sequential( - nn.Flatten(), - nn.Linear(128 * 30 * 30, 128), - nn.ReLU(), - nn.Dropout(0.1), - nn.Linear(128, 64), - nn.ReLU(), - nn.Dropout(0.1), - nn.Linear(64, 1), - nn.Sigmoid() - ) - - def forward(self, x): - x = self.conv_layers(x) - x = self.fc_layers(x) - return x - - -def load_checkpoint(checkpoint, model): - print("=> Loading checkpoint") - model.load_state_dict(checkpoint["state_dict"]) - - -model = CNNModel().to("cpu") -load_checkpoint(torch.load("model.pth.tar", map_location=torch.device('cpu')), model) -model.eval() -class_names = ["cat", "dog"] - -st.title("Cat vs Dog Classifier") -st.write("Upload an image and let the model predict whether it's a cat or a dog!") - -uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"]) - -if uploaded_image is not None: - image = Image.open(uploaded_image) - st.image(image, caption="Uploaded Image", use_column_width=True) - - # Define the transformation to convert the image to a tensor - transform = transforms.Compose([ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - ]) - - # Apply the transformation to the image - tensor_image = transform(image) - - # Predict - predictions = model(tensor_image.unsqueeze(0)) - predicted_class_index = torch.argmax(predictions).item() - predicted_class = class_names[predicted_class_index] - - val = torch.max(predictions) - if val > 0.5: - text = "dog" - else: - text = "cat" - st.write(f"Prediction: {text} with confidence {val:.2f}") diff --git a/spaces/terfces0erbo/CollegeProjectV2/Adobe Photoshop Lightroom CC 2019 V2.0.1 Crack.md b/spaces/terfces0erbo/CollegeProjectV2/Adobe Photoshop Lightroom CC 2019 V2.0.1 Crack.md deleted file mode 100644 index 0a7d2d3a31ab053dcf6515dab5b6cd5082a3a09d..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Adobe Photoshop Lightroom CC 2019 V2.0.1 Crack.md +++ /dev/null @@ -1,14 +0,0 @@ -

        Adobe Photoshop Lightroom CC 2019 v2.0.1 Crack


        Download ★★★ https://bytlly.com/2uGkLW



        - -Adobe After Effects CC (2019) v16.0.1.48 (x64) ... Adobe Lightroom CC (2019) 2.0.1 (x64) ... Adobe Photoshop Lightroom Classic CC (2019) 8.1 (x64). Soft Portal. -Graphics software / Adobe Photoshop ... -Adobe After Effects CC 2019 download torrent in Russian ... -Adobe After Effects CC 2019 is a popular video and motion picture editing software that... -Adobe After Effects CC ... -Adobe Photoshop CC 2019 | Free download -Adobe Photoshop CC 2019 is the latest version of the raster graphics processing program. -Many graphic... -Adobe After Effects CC 2019 torrent download for Windows... 8a78ff9644
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Beachhead 2000 Activation Code And Serial Number.md b/spaces/terfces0erbo/CollegeProjectV2/Beachhead 2000 Activation Code And Serial Number.md deleted file mode 100644 index b6b400833c466c88a408b81296a6418021db0578..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Beachhead 2000 Activation Code And Serial Number.md +++ /dev/null @@ -1,30 +0,0 @@ - -

        How to Get Beachhead 2000 Activation Code and Serial Number

        -

        Beachhead 2000 is a first-person shooter game that was released in 2000 by Digital Fusion. The game is set in a futuristic war scenario where the player has to defend a beach from waves of enemy soldiers, tanks, helicopters, and other vehicles. The game requires an activation code and a serial number to run.

        -

        If you have lost or forgotten your activation code and serial number for Beachhead 2000, you may be able to find them in one of the following ways:

        -

        Beachhead 2000 activation code and serial number


        Download Zip ->>->>->> https://bytlly.com/2uGlkR



        -
          -
        • Check the original CD case or manual that came with the game. The activation code and serial number may be printed on a sticker or a card inside.
        • -
        • Check your email inbox for a confirmation message from Digital Fusion or the online retailer that sold you the game. The activation code and serial number may be included in the email.
        • -
        • Contact Digital Fusion's customer support and provide them with proof of purchase, such as a receipt or an order number. They may be able to retrieve your activation code and serial number for you.
        • -
        -

        However, if you do not have a legitimate copy of Beachhead 2000, you should not try to obtain an activation code and serial number illegally. Doing so may expose your computer to malware, viruses, or other security risks. It may also violate the terms of service and the intellectual property rights of Digital Fusion and its partners. Therefore, we recommend that you purchase a legal copy of Beachhead 2000 from a reputable source if you want to enjoy the game safely and legally.

        Beachhead 2000 is a challenging and addictive game that will test your reflexes and strategic skills. The game features realistic graphics, sound effects, and physics that create an immersive experience. You can choose from different difficulty levels, weapons, and modes to customize your gameplay. You can also play with or against other players online or on a local network.

        -

        Beachhead 2000 is compatible with Windows 95, 98, ME, 2000, and XP. The game requires a Pentium II 266 MHz processor or higher, 32 MB of RAM or more, a DirectX 7.0 compatible video card with 8 MB of VRAM or more, a DirectX 7.0 compatible sound card, a CD-ROM drive, and a mouse. The game also requires an internet connection for online play and activation.

        -

        If you are a fan of first-person shooter games and want to experience the thrill of defending a beach from relentless enemies, you should give Beachhead 2000 a try. You can buy the game from Digital Fusion's website or from other online retailers. You can also download a free demo version of the game from various websites. However, make sure you have a valid activation code and serial number before you install the game on your computer.

        Once you have purchased or downloaded Beachhead 2000, you can install the game by following these steps:

        -

        -
          -
        1. Insert the CD-ROM into your CD-ROM drive or locate the downloaded file on your computer.
        2. -
        3. Run the setup program and follow the instructions on the screen.
        4. -
        5. Enter your activation code and serial number when prompted.
        6. -
        7. Wait for the installation to complete and then launch the game from the Start menu or the desktop shortcut.
        8. -
        -

        By buying Beachhead 2000, you will enjoy several benefits, such as:

        -
          -
        • Access to all the features and modes of the game, including online multiplayer.
        • -
        • Support from Digital Fusion's customer service and technical support team.
        • -
        • Updates and patches for the game to fix any bugs or issues.
        • -
        • The satisfaction of supporting the developers and publishers of the game.
        • -
        -

        If you need any help with Beachhead 2000, you can contact Digital Fusion's customer support by visiting their website or sending them an email. You can also check their FAQ section or their online forum for answers to common questions and problems. You can also read reviews and tips from other players online or in magazines.

        d5da3c52bf
        -
        -
        \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/CRACK Poedit Pro 2.1 Build 5407 Multilingual Full With Medicine[BabuPC !NEW!.md b/spaces/terfces0erbo/CollegeProjectV2/CRACK Poedit Pro 2.1 Build 5407 Multilingual Full With Medicine[BabuPC !NEW!.md deleted file mode 100644 index b852742416f9d203845cc6bb06d7594c82812494..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/CRACK Poedit Pro 2.1 Build 5407 Multilingual Full With Medicine[BabuPC !NEW!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        CRACK Poedit Pro 2.1 Build 5407 Multilingual Full With Medicine[BabuPC


        Download ––– https://bytlly.com/2uGlu9



        -
        -Forza forza4.8 r5.01 Full Crack Cabirriio. Forza cabirriio 4.8 r5.01 Crack Forza Sion Version Forza Cabirriio Editor Pro Crack Download Full Version from here.1Download forza cabirriio 4.8 r5.01 crack Free. 1.00 MB. Cabirriio. 4.8 r5.01 Crack. Forza Cabirriio. 4.8 R5.01 Forza Cabirriio Editor Pro. Forza Cabirriio Editor Pro.Download Latest Cabirriio. 4.8 Forza Forza Cabirriio Editor Pro Download Forza Cabirriio Editor Pro Crack Full Version is an amazing and easy-to-use software for authors, editors, and translators.FORZA FORZA 4.8 R5.01 FULL CRACK + PATCH for PC use only. In addition.Il Forza Cabirriio Editor Pro Crack Cabirriio Crack for Mac No Survey. Forza Cabirriio Editor Pro Crack.Zip. Cabirriio. 4.8 Forza Forza Cabirriio Editor Pro Download Cabirriio Editor Pro Crack.Compatible with all editions and all languages. [FULL-FREE] FORZA FORZA 4.8 R5.01 FULL CRACK + PATCH For PC use only. The best CCN for di-electronics, communication, utilities, News, lifestyle, sports, environmental and health topics. An essential reference for IT professionals. [FULL-FREE] Forza Cabirriio Editor Pro Crack Cabirriio Editor Pro Crack Cabirriio Editor Pro Crack is an amazing and easy-to-use software for authors, editors, and translators.Editori e Pubblicità per il Mondo di Cabirriio 4.8.Forza Cabirriio Editor Pro Crack Cabirriio Editor Pro 4.8 Forza Cabirriio Editor Pro Crack. Cabirriio Cabirriio Editor Pro Crack download for free. The best CCN for di-electronics, communication, utilities, News, lifestyle, sports, environmental and health topics. Cabirriio Cabirriio Editor Pro Crack is an amazing and easy-to-use software for authors, editors, and translators.FREE Forza cabirriio 4.8 R5.01. This is a new version 4fefd39f24
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Dgs Ramsete Iii V9 05.rar __HOT__.md b/spaces/terfces0erbo/CollegeProjectV2/Dgs Ramsete Iii V9 05.rar __HOT__.md deleted file mode 100644 index c7f8ceeff539685059f1589f98fa6a010da88add..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Dgs Ramsete Iii V9 05.rar __HOT__.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Dgs Ramsete Iii V9 05.rar


        Download ►►► https://bytlly.com/2uGj12



        - - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/thebestteamever/fire_detection_project/README.md b/spaces/thebestteamever/fire_detection_project/README.md deleted file mode 100644 index 8a75678f4be40c6cd021e5b289d96f979d1b3507..0000000000000000000000000000000000000000 --- a/spaces/thebestteamever/fire_detection_project/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Fire_detection_project -emoji: 💩 -colorFrom: indigo -colorTo: red -sdk: gradio -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/thejagstudio/procom/croma/apps.py b/spaces/thejagstudio/procom/croma/apps.py deleted file mode 100644 index b5413e7937c87ee8e7af6e6a8521285a58cd22d8..0000000000000000000000000000000000000000 --- a/spaces/thejagstudio/procom/croma/apps.py +++ /dev/null @@ -1,6 +0,0 @@ -from django.apps import AppConfig - - -class CromaConfig(AppConfig): - default_auto_field = "django.db.models.BigAutoField" - name = "croma" diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office 365 Pro Plus for Free (No Crack Required).md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office 365 Pro Plus for Free (No Crack Required).md deleted file mode 100644 index d0278e6b0cc8c9db91bf64279f2cdb125951302e..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office 365 Pro Plus for Free (No Crack Required).md +++ /dev/null @@ -1,36 +0,0 @@ - -

        Microsoft Office 365 Pro Plus: A Comprehensive Guide

        -

        Microsoft Office 365 Pro Plus is a subscription-based service that provides access to the latest version of Microsoft Office applications and cloud services. It is designed for business users who need the most advanced features and functionality of Office 365. In this article, we will explore what Microsoft Office 365 Pro Plus offers, how to download and install it, and what are its benefits and drawbacks.

        -

        microsoft office 365 pro plus free download full version with crack


        DOWNLOAD →→→ https://urlcod.com/2uKakz



        -

        What is Microsoft Office 365 Pro Plus?

        -

        Microsoft Office 365 Pro Plus is one of the plans available in the Microsoft 365 and Office 365 service families. It includes the following components:

        -
          -
        • Office apps: You can install the full versions of Word, Excel, PowerPoint, Outlook, OneNote, Access, Publisher, and Skype for Business on up to five devices per user. You can also use the web and mobile versions of these apps on any device with an internet connection.
        • -
        • Cloud services: You can store and share files online with OneDrive for Business, which offers 1 TB of cloud storage per user. You can also collaborate with others in real time using SharePoint Online, Teams, and Yammer. You can also use Exchange Online for email and calendar, and Power BI for data analysis and visualization.
        • -
        • Security and compliance: You can protect your data and devices with Microsoft Defender, which provides advanced threat protection, identity and access management, information protection, and security management. You can also comply with various regulations and standards using tools such as eDiscovery, Data Loss Prevention, and Audit Logs.
        • -
        • Support and updates: You can get 24/7 phone and web support from Microsoft, as well as access to online resources and training. You can also get the latest features and security updates for Office apps automatically without any additional cost.
        • -
        -

        How to download and install Microsoft Office 365 Pro Plus?

        -

        To download and install Microsoft Office 365 Pro Plus, you need to have a valid subscription and a Microsoft account. You can follow these steps:

        -
          -
        1. Go to office.com and sign in with your Microsoft account.
        2. -
        3. Select Install Office and choose Office 365 apps.
        4. -
        5. A setup file will be downloaded to your device. Run the file and follow the instructions to complete the installation.
        6. -
        7. Activate your Office apps by signing in with your Microsoft account when prompted.
        8. -
        -

        You can also download Office apps individually from the Microsoft Store or use the Office Deployment Tool to customize your installation options.

        -

        -

        What are the benefits and drawbacks of Microsoft Office 365 Pro Plus?

        -

        Microsoft Office 365 Pro Plus has many benefits for business users, such as:

        -
          -
        • Productivity: You can work more efficiently and effectively with the most advanced features and functionality of Office apps. You can also access your files and apps from anywhere on any device with an internet connection.
        • -
        • Collaboration: You can communicate and cooperate with your colleagues and partners using cloud services such as Teams, SharePoint Online, and Yammer. You can also co-author documents in real time and share feedback using comments and @mentions.
        • -
        • Security and compliance: You can protect your data and devices with Microsoft Defender, which provides comprehensive security solutions for your organization. You can also comply with various regulations and standards using tools such as eDiscovery, Data Loss Prevention, and Audit Logs.
        • -
        • Support and updates: You can get 24/7 phone and web support from Microsoft, as well as access to online resources and training. You can also get the latest features and security updates for Office apps automatically without any additional cost.
        • -
        -

        However, Microsoft Office 365 Pro Plus also has some drawbacks, such as:

        -
          -
        • Cost: You need to pay a monthly or annual fee to use Microsoft Office 365 Pro Plus, which may be higher than buying a one-time license for Office Home & Student or Office Home & Business. You also need to renew your subscription regularly to keep using the service.
        • -
        • Compatibility:

          ddb901b051
          -
          -
          \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Introduccionalelectromagnetismopopoviczoya35.md b/spaces/tialenAdioni/chat-gpt-api/logs/Introduccionalelectromagnetismopopoviczoya35.md deleted file mode 100644 index a7bb28a1d2ef2f834f7a597268b11b9316b5e27a..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Introduccionalelectromagnetismopopoviczoya35.md +++ /dev/null @@ -1,105 +0,0 @@ -
          -

          Introduccionalelectromagnetismopopoviczoya35: What is it and why should you care?

          -

          If you are interested in learning about electromagnetism, one of the fundamental forces of nature that governs electricity, magnetism, light, and other phenomena, you might want to check out introduccionalelectromagnetismopopoviczoya35. This is a keyword that refers to a book called Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, two renowned professors and experts in the field of electromagnetics.

          -

          introduccionalelectromagnetismopopoviczoya35


          Download Filehttps://urlcod.com/2uK9GJ



          -

          Introductory Electromagnetics is a book that relates basic physical principles to engineering practice with a number of application examples. It is mathematically simple, but exact. It contains application chapters that integrate knowledge readers gain in previous chapters. It includes a “questions” feature that guides the reader as they read the book to provide a qualitative understanding of the material. It derives mathematical tools from physical concepts when needed. It presents transmission lines in the middle of the book once readers have understood distributed circuit elements.

          -

          Introductory Electromagnetics is a book that is suitable for engineers who are interested in refreshing their knowledge of electromagnetics, as well as for students who are taking their first course in electromagnetics. It covers topics such as electrostatics, magnetostatics, Maxwell's equations, plane waves, transmission lines, waveguides, antennas, radiation, and microwave circuits.

          -

          Who are Zoya Popovic and Branko D. Popovic?

          -

          Zoya Popovic and Branko D. Popovic are the authors of Introductory Electromagnetics and other books on electromagnetics and microwave engineering. They are also father and daughter who share a passion for teaching and research.

          -

          introduccion al electromagnetismo popovic zoya pdf
          -introduccion al electromagnetismo popovic zoya descargar
          -introduccion al electromagnetismo popovic zoya solucionario
          -introduccion al electromagnetismo popovic zoya libro
          -introduccion al electromagnetismo popovic zoya gratis
          -introduccion al electromagnetismo popovic zoya segunda edicion
          -introduccion al electromagnetismo popovic zoya online
          -introduccion al electromagnetismo popovic zoya ejercicios resueltos
          -introduccion al electromagnetismo popovic zoya amazon
          -introduccion al electromagnetismo popovic zoya mercadolibre
          -introduccion al electromagnetismo popovic zoya opiniones
          -introduccion al electromagnetismo popovic zoya indice
          -introduccion al electromagnetismo popovic zoya fisica
          -introduccion al electromagnetismo popovic zoya editorial
          -introduccion al electromagnetismo popovic zoya isbn
          -introduccion al electromagnetismo popovic zoya comprar
          -introduccion al electromagnetismo popovic zoya usado
          -introduccion al electromagnetismo popovic zoya precio
          -introduccion al electromagnetismo popovic zoya resumen
          -introduccion al electromagnetismo popovic zoya capitulo 1
          -introduccion al electromagnetismo popovic zoya capitulo 2
          -introduccion al electromagnetismo popovic zoya capitulo 3
          -introduccion al electromagnetismo popovic zoya capitulo 4
          -introduccion al electromagnetismo popovic zoya capitulo 5
          -introduccion al electromagnetismo popovic zoya capitulo 6
          -introduccion al electromagnetismo popovic zoya capitulo 7
          -introduccion al electromagnetismo popovic zoya capitulo 8
          -introduccion al electromagnetismo popovic zoya capitulo 9
          -introduccion al electromagnetismo popovic zoya capitulo 10
          -introduccion al electromagnetismo popovic zoya capitulo 11
          -introduccion al electromagnetismo popovic zoya capitulo 12
          -introduccion al electromagnetismo popovic zoya capitulo 13
          -introduccion al electromagnetismo popovic zoya capitulo 14
          -introduccion al electromagnetismo popovic zoya capitulo 15
          -introduccion al electromagnetismo popovic zoya capitulo 16
          -introduccion al electromagnetismo popovic zoya examen final
          -introduccion al electromagnetismo popovic zoya apuntes
          -introduccion al electromagnetismo popovic zoya slideshare
          -introduccion al electromagnetismo popovic zoya scribd
          -introduccion al electromagnetismo popovic zoya google books
          -introduccion al electromagnetismo popovic zoya goodreads
          -introduccion al electromagnetismo popovic zoya youtube
          -introduccion al electromagnetismo popovic zoya video explicativo
          -introduccion al electromagnetismo popovic zoya reseña critica
          -introduccion al electromagnetismo popovic zoya biografia de la autora
          -introduccion al electromagnetismo popovic zoya bibliografia recomendada
          -introduccion al electromagnetismo popovic zoya temas principales
          -introduccion al electromagnetismo popovic zoya dificultad del libro
          -introduccion al electromagnetismo popovic zoya para que sirve el libro

          -

          Zoya Popovic received her B.Sc. from the University of Belgrade in 1985, and Ph.D. from Caltech in 1990. She has since been at the University of Colorado at Boulder, where she is a Distinguished Professor and holds the Hudson Moore Jr. Chair in Electrical Engineering. She is a coauthor of Quasi-optical and Active Arrays for Spatial Power Combining (Wiley, 1997), and holds several patents. She received the Eta Kappa Nu professor of the year award from her students. She won the IEEE MTT Microwave Prize, the NSF Presidential Faculty Fellow Award, the URSI Young Scientist Award, the International URSI Issak Koga Gold Medal, and the University of Colorado Margaret Willard Award.

          -

          Branko D. Popovic received his degrees at the University of Belgrade, Serbia, Yugoslavia, where he has been a professor for the past four decades. He was a visiting professor at Virginia Tech (VPI), McGill University, and the University of Colorado, and delivered short courses around the world, including Chengdu University in China and the Telebras Institute in Brasil. He is the author of 3 monographs and 6 textbooks in English and Serbian, and was the recipient of the IEE James Clerk Maxwell Award, the IERE Heinrich Hertz Premium, and the Serbian Nikola Tesla Award, as well as numerous teaching excellence awards from his students.

          -

          How can you get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic?

          -

          If you want to get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, you have several options to choose from. You can either buy it online or offline from various sources.

          -

          You can buy it online from Amazon.com for $87.05 (hardcover) or $3.99 (delivery). You can also find it on Google Books for free preview or purchase. You can also search for other websites that offer introduccionalelectromagnetismopopoviczoya35 in download or online access.

          -

          You can buy it offline from your local bookstore or library. You can also borrow it from your friends or colleagues who have a copy of it.

          -

          Conclusion

          -

          In conclusion, introduccionalelectromagnetismopopoviczoya35 is a keyword that you should know if you want to learn more about electromagnetism and its applications. It is a keyword that leads you to Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, a book that relates basic physical principles to engineering practice with a number of application examples.

          -

          Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic is a book that you can get online or offline from various sources. It is a book that will help you refresh your knowledge of electromagnetics or take your first course in electromagnetics.

          -

          So what are you waiting for? Grab your copy of Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic today!

          -

          Introduccionalelectromagnetismopopoviczoya35: What is it and why should you care?

          -

          If you are interested in learning about electromagnetism, one of the fundamental forces of nature that governs electricity, magnetism, light, and other phenomena, you might want to check out introduccionalelectromagnetismopopoviczoya35. This is a keyword that refers to a book called Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, two renowned professors and experts in the field of electromagnetics.

          -

          Introductory Electromagnetics is a book that relates basic physical principles to engineering practice with a number of application examples. It is mathematically simple, but exact. It contains application chapters that integrate knowledge readers gain in previous chapters. It includes a “questions” feature that guides the reader as they read the book to provide a qualitative understanding of the material. It derives mathematical tools from physical concepts when needed. It presents transmission lines in the middle of the book once readers have understood distributed circuit elements.

          -

          Introductory Electromagnetics is a book that is suitable for engineers who are interested in refreshing their knowledge of electromagnetics, as well as for students who are taking their first course in electromagnetics. It covers topics such as electrostatics, magnetostatics, Maxwell's equations, plane waves, transmission lines, waveguides, antennas, radiation, and microwave circuits.

          -

          Who are Zoya Popovic and Branko D. Popovic?

          -

          Zoya Popovic and Branko D. Popovic are the authors of Introductory Electromagnetics and other books on electromagnetics and microwave engineering. They are also father and daughter who share a passion for teaching and research.

          -

          Zoya Popovic received her B.Sc. from the University of Belgrade in 1985, and Ph.D. from Caltech in 1990. She has since been at the University of Colorado at Boulder, where she is a Distinguished Professor and holds the Hudson Moore Jr. Chair in Electrical Engineering. She is a coauthor of Quasi-optical and Active Arrays for Spatial Power Combining (Wiley, 1997), and holds several patents. She received the Eta Kappa Nu professor of the year award from her students. She won the IEEE MTT Microwave Prize, the NSF Presidential Faculty Fellow Award, the URSI Young Scientist Award, the International URSI Issak Koga Gold Medal, and the University of Colorado Margaret Willard Award.

          -

          Branko D. Popovic received his degrees at the University of Belgrade, Serbia, Yugoslavia, where he has been a professor for the past four decades. He was a visiting professor at Virginia Tech (VPI), McGill University, and the University of Colorado, and delivered short courses around the world, including Chengdu University in China and the Telebras Institute in Brasil. He is the author of 3 monographs and 6 textbooks in English and Serbian, and was the recipient of the IEE James Clerk Maxwell Award, the IERE Heinrich Hertz Premium, and the Serbian Nikola Tesla Award, as well as numerous teaching excellence awards from his students.

          -

          How can you get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic?

          -

          If you want to get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, you have several options to choose from. You can either buy it online or offline from various sources.

          -

          You can buy it online from Amazon.com for $87.05 (hardcover) or $3.99 (delivery). You can also find it on Google Books for free preview or purchase. You can also search for other websites that offer introduccionalelectromagnetismopopoviczoya35 in download or online access.

          -

          You can buy it offline from your local bookstore or library. You can also borrow it from your friends or colleagues who have a copy of it.

          -

          Conclusion

          -

          In conclusion, introduccionalelectromagnetismopopoviczoya35 is a keyword that you should know if you want to learn more about electromagnetism and its applications. It is a keyword that leads you to Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, a book that relates basic physical principles to engineering practice with a number of application examples.

          -

          Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic is a book that you can get online or offline from various sources. It is a book that will help you refresh your knowledge of electromagnetics or take your first course in electromagnetics.

          -

          So what are you waiting for? Grab your copy of Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic today!

          -

          Introduccionalelectromagnetismopopoviczoya35: What is it and why should you care?

          -

          If you are interested in learning about electromagnetism, one of the fundamental forces of nature that governs electricity, magnetism, light, and other phenomena, you might want to check out introduccionalelectromagnetismopopoviczoya35. This is a keyword that refers to a book called Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, two renowned professors and experts in the field of electromagnetics.

          -

          Introductory Electromagnetics is a book that relates basic physical principles to engineering practice with a number of application examples. It is mathematically simple, but exact. It contains application chapters that integrate knowledge readers gain in previous chapters. It includes a “questions” feature that guides the reader as they read the book to provide a qualitative understanding of the material. It derives mathematical tools from physical concepts when needed. It presents transmission lines in the middle of the book once readers have understood distributed circuit elements.

          -

          Introductory Electromagnetics is a book that is suitable for engineers who are interested in refreshing their knowledge of electromagnetics, as well as for students who are taking their first course in electromagnetics. It covers topics such as electrostatics, magnetostatics, Maxwell's equations, plane waves, transmission lines, waveguides, antennas, radiation, and microwave circuits.

          -

          Who are Zoya Popovic and Branko D. Popovic?

          -

          Zoya Popovic and Branko D. Popovic are the authors of Introductory Electromagnetics and other books on electromagnetics and microwave engineering. They are also father and daughter who share a passion for teaching and research.

          -

          Zoya Popovic received her B.Sc. from the University of Belgrade in 1985, and Ph.D. from Caltech in 1990. She has since been at the University of Colorado at Boulder, where she is a Distinguished Professor and holds the Hudson Moore Jr. Chair in Electrical Engineering. She is a coauthor of Quasi-optical and Active Arrays for Spatial Power Combining (Wiley, 1997), and holds several patents. She received the Eta Kappa Nu professor of the year award from her students. She won the IEEE MTT Microwave Prize, the NSF Presidential Faculty Fellow Award, the URSI Young Scientist Award, the International URSI Issak Koga Gold Medal, and the University of Colorado Margaret Willard Award.

          -

          Branko D. Popovic received his degrees at the University of Belgrade, Serbia, Yugoslavia, where he has been a professor for the past four decades. He was a visiting professor at Virginia Tech (VPI), McGill University, and the University of Colorado, and delivered short courses around the world, including Chengdu University in China and the Telebras Institute in Brasil. He is the author of 3 monographs and 6 textbooks in English and Serbian, and was the recipient of the IEE James Clerk Maxwell Award, the IERE Heinrich Hertz Premium, and the Serbian Nikola Tesla Award, as well as numerous teaching excellence awards from his students.

          -

          How can you get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic?

          -

          If you want to get Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, you have several options to choose from. You can either buy it online or offline from various sources.

          -

          You can buy it online from Amazon.com for $87.05 (hardcover) or $3.99 (delivery). You can also find it on Google Books for free preview or purchase. You can also search for other websites that offer introduccionalelectromagnetismopopoviczoya35 in download or online access.

          -

          You can buy it offline from your local bookstore or library. You can also borrow it from your friends or colleagues who have a copy of it.

          -

          Conclusion

          -

          In conclusion, introduccionalelectromagnetismopopoviczoya35 is a keyword that you should know if you want to learn more about electromagnetism and its applications. It is a keyword that leads you to Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, a book that relates basic physical principles to engineering practice with a number of application examples.

          -

          Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic is a book that you can get online or offline from various sources. It is a book that will help you refresh your knowledge of electromagnetics or take your first course in electromagnetics.

          -

          So what are you waiting for? Grab your copy of Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic today!

          -

          Conclusion

          -

          In conclusion, introduccionalelectromagnetismopopoviczoya35 is a keyword that you should know if you want to learn more about electromagnetism and its applications. It is a keyword that leads you to Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic, a book that relates basic physical principles to engineering practice with a number of application examples.

          -

          Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic is a book that you can get online or offline from various sources. It is a book that will help you refresh your knowledge of electromagnetics or take your first course in electromagnetics.

          -

          So what are you waiting for? Grab your copy of Introductory Electromagnetics by Zoya Popovic and Branko D. Popovic today!

          679dcb208e
          -
          -
          \ No newline at end of file diff --git a/spaces/timothepearce/mnist-classification/app.py b/spaces/timothepearce/mnist-classification/app.py deleted file mode 100644 index ee8c018f08d84ec54df893cdce21fee18e0fb95e..0000000000000000000000000000000000000000 --- a/spaces/timothepearce/mnist-classification/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import torch -import gradio as gr -from PIL import Image -from torch import nn -from torchvision import transforms - -classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] - - -class NeuralNetwork(nn.Module): - def __init__(self): - super(NeuralNetwork, self).__init__() - self.flatten = nn.Flatten() - self.linear_relu_stack = nn.Sequential( - nn.Linear(28 * 28, 784), - nn.ReLU(), - nn.Linear(784, 784), - nn.ReLU(), - nn.Linear(784, 10) - ) - - def forward(self, x): - x = self.flatten(x) - logits = self.linear_relu_stack(x) - return logits - - -model = NeuralNetwork() -model.load_state_dict(torch.load("model.pth", map_location=torch.device('cpu'))) -model.eval() - - -def image_classifier(img_input): - img = Image.fromarray(img_input.astype('uint8'), 'RGB') - img = transforms.ToTensor()(img) - - with torch.no_grad(): - pred = model(img)[0] - pred = torch.nn.functional.softmax(pred) - return {classes[i]: float(pred[i]) for i in range(10)} - - -gr.Interface(fn=image_classifier, - inputs=gr.Image(shape=(28, 28)), - outputs=gr.Label(num_top_classes=4), - examples=["mnist_0.png", "mnist_2.png", "mnist_3.png"]).launch() diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/IBM SPSS Statistics 24 Crack Keygen Full Version Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/IBM SPSS Statistics 24 Crack Keygen Full Version Download.md deleted file mode 100644 index 57ddbee8d954cfaf44a64d1df07800a73621e045..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/IBM SPSS Statistics 24 Crack Keygen Full Version Download.md +++ /dev/null @@ -1,115 +0,0 @@ -
          -

          IBM SPSS Statistics 24 Crack Keygen Full Version Download

          -

          If you are looking for a powerful statistical software platform that can help you solve business and research problems, you might have heard of IBM SPSS Statistics 24. This software offers a user-friendly interface and a robust set of features that lets you quickly extract actionable insights from your data. However, you might also be wondering how to get this software for free or at a lower cost, and you might have come across some websites that offer crack keygen software for IBM SPSS Statistics 24. In this article, we will explain what IBM SPSS Statistics 24 is, what crack keygen software is, and why you should avoid using it. We will also show you how to get IBM SPSS Statistics 24 legally and safely, and what are the alternatives and options for getting this software for free or at a lower cost.

          -

          What is IBM SPSS Statistics 24?

          -

          IBM SPSS Statistics 24 is the latest version of the world’s leading statistical software used to solve business and research problems by means of ad-hoc analysis, hypothesis testing, and predictive analytics. Organizations use IBM SPSS Statistics to understand data, analyze trends, forecast and plan to validate assumptions and drive accurate conclusions.

          -

          IBM SPSS Statistics 24 Crack Keygen Full Version Download


          DOWNLOAD >>> https://urlcod.com/2uHxDi



          -

          Features and benefits of IBM SPSS Statistics 24

          -

          Some of the features and benefits of IBM SPSS Statistics 24 are:

          -
            -
          • Easy to use: You can prepare and analyze data through an intuitive user interface without having to write code using drag-and-drop.
          • -
          • Integrated with open source: You can enhance SPSS syntax with R and Python using a library of extensions or by building your own.
          • -
          • Comprehensive: You can run descriptive statistics and regression analyses, view patterns of missing data, and summarize variable distributions with an integrated interface.
          • -
          • Advanced: You can perform complex analyses such as Bayesian procedures, multilayer perceptron network, accelerated failure time survival models, etc.
          • -
          • Flexible: You can choose from subscription or traditional licenses, with multiple options for capabilities based on your needs.
          • -
          -

          How to download and install IBM SPSS Statistics 24

          -

          If you want to download and install IBM SPSS Statistics 24, you need to follow these steps:

          -
            -
          1. Go to the IBM Passport Advantage® Web Site and sign in or register.
          2. -
          3. Click Download finder under Find downloads & media.
          4. -
          5. Select the download you want under Download finder, select a language and one or more platforms, select the options you want, and click Continue.
          6. -
          7. Select the download you want under Review “Current version” downloads, expand it and select the items you want. If applicable, select the optional downloads you want.
          8. -
          9. Update the Download method or the Download location if you want, click I agree, and click Download now.
          10. -
          11. Unpack each of the downloaded parts into a single temporary directory on your system.
          12. -
          13. Follow the installation instructions provided in the download to install the product.
          14. -
          -

          What is crack keygen software?

          -

          A crack keygen software is a software that generates serial keys or activation codes for another software that requires them. A serial key or an activation code is a unique combination of numbers and letters that verifies that a user has purchased a legitimate copy of the software. A crack keygen software is usually created by hackers or crackers who want to bypass the security measures of the original software and distribute it for free or for malicious purposes.

          -

          The risks and disadvantages of using crack keygen software

          -

          Using crack keygen software for IBM SPSS Statistics 24 or any other software is not only illegal, but also risky and disadvantageous. Some of the risks and disadvantages are :

          -
            -
          • Viruses and malware: Crack keygen software often contains viruses, malware, spyware, or ransomware that can infect your computer, steal your personal information, damage your files, or lock your system until you pay a ransom.
          • -
          • Poor performance and compatibility: Crack keygen software often does not work properly, crashes frequently, or causes errors and conflicts with other programs. It may also not be compatible with the latest updates, patches, or features of the original software.
          • -
          • No support or warranty: Crack keygen software does not come with any support or warranty from the original software developer. If you encounter any problems or issues with the software, you will not be able to get any help or assistance.
          • -
          • Legal consequences: Crack keygen software violates the intellectual property rights of the original software developer. If you are caught using or distributing crack keygen software, you may face legal actions such as fines, lawsuits, or even criminal charges.
          • -
          -

          The legal and ethical issues of using crack keygen software

          -

          Besides the risks and disadvantages, using crack keygen software for IBM SPSS Statistics 24 or any other software also raises some legal and ethical issues. Some of the legal and ethical issues are :

          -
            -
          • Software piracy: Software piracy is the unauthorized copying, distribution, or use of software without paying for it. Software piracy is a form of theft that deprives the original software developer of their rightful income and recognition.
          • -
          • Unfair competition: Unfair competition is the practice of gaining an unfair advantage over others by using illegal or unethical means. Using crack keygen software for IBM SPSS Statistics 24 may give you an unfair edge over other users or competitors who have paid for the software legitimately.
          • -
          • Academic dishonesty: Academic dishonesty is the act of cheating, plagiarizing, or falsifying academic work. Using crack keygen software for IBM SPSS Statistics 24 may compromise the integrity and quality of your research or analysis, and may result in academic penalties such as failing grades, suspension, or expulsion.
          • -
          -

          How to get IBM SPSS Statistics 24 legally and safely

          -

          If you want to get IBM SPSS Statistics 24 legally and safely, you have two main options: buying it or getting it for free or at a lower cost. Each option has its own advantages and disadvantages that you need to consider before making a decision.

          -

          -

          The advantages and disadvantages of buying IBM SPSS Statistics 24

          -

          The most straightforward way to get IBM SPSS Statistics 24 legally and safely is to buy it from the official website or an authorized reseller. The advantages of buying IBM SPSS Statistics 24 are :

          -
            -
          • You get access to the full features and capabilities of the software without any limitations or restrictions.
          • -
          • You get regular updates, patches, and enhancements that improve the performance and functionality of the software.
          • -
          • You get technical support and customer service from the original software developer in case you encounter any problems or issues with the software.
          • -
          • You respect the intellectual property rights of the original software developer and support their innovation and development.
          • -
          -

          The disadvantages of buying IBM SPSS Statistics 24 are :

          -
            -
          • You have to pay a relatively high price for the software license, which may vary depending on your location, currency, and type of license (subscription or traditional).
          • -
          • You have to comply with the terms and conditions of the license agreement, which may limit your usage rights and obligations regarding the software.
          • -
          • You have to ensure that your system meets the minimum requirements for running the software smoothly and efficiently.
          • -
          -

          The alternatives and options for getting IBM SPSS Statistics 24 for free or at a lower cost

          If you want to get IBM SPSS Statistics 24 for free or at a lower cost, you have some alternatives and options that are legal and safe. However, you also need to be aware of the trade-offs and limitations that come with them. Some of the alternatives and options are :

          -
            -
          • Free trial: You can download and use IBM SPSS Statistics 24 for free for a limited period of time (usually 14 days) from the official website. This is a good way to test the software and see if it meets your needs and expectations. However, after the trial period expires, you will need to buy the software license or uninstall it from your system.
          • -
          • Student edition: If you are a student or an educator, you can get IBM SPSS Statistics 24 at a discounted price from the official website or an authorized reseller. This is a great way to learn and teach statistics and data analysis using a professional software. However, you will need to provide proof of your academic status and agree to use the software only for non-commercial purposes.
          • -
          • Open source alternatives: If you are looking for a free and open source software that can perform similar functions as IBM SPSS Statistics 24, you can try some of the following options:
              -
            • R: R is a programming language and environment for statistical computing and graphics. It offers a wide range of statistical and graphical techniques, and is highly extensible.
            • -
            • Python: Python is a general-purpose programming language that has many libraries and modules for data analysis, such as pandas, numpy, scipy, matplotlib, etc.
            • -
            • PSPP: PSPP is a free software application for analysis of sampled data. It is designed as a replacement for SPSS, and has a similar user interface and syntax.
            • -
            • JASP: JASP is a free software for statistical analysis with an intuitive graphical user interface. It supports classical and Bayesian inference, and can import data from SPSS, Excel, CSV, etc.
            • -
            -However, you will need to learn how to use these software tools, which may have different features, capabilities, and interfaces than IBM SPSS Statistics 24.
          • -
          -

          Conclusion

          -

          In conclusion, IBM SPSS Statistics 24 is a powerful statistical software platform that can help you solve business and research problems by means of ad-hoc analysis, hypothesis testing, and predictive analytics. However, using crack keygen software for IBM SPSS Statistics 24 is not only illegal, but also risky and disadvantageous. It exposes you to viruses and malware, poor performance and compatibility, no support or warranty, and legal consequences. It also violates the intellectual property rights of the original software developer, creates unfair competition for other users or competitors, and compromises academic honesty for researchers or students. Therefore, you should avoid using crack keygen software for IBM SPSS Statistics 24 or any other software. Instead, you should get IBM SPSS Statistics 24 legally and safely by buying it from the official website or an authorized reseller, or by using some of the alternatives and options that are available for free or at a lower cost.

          -

          Summary of the main points

          -

          Here is a summary of the main points of this article:

          -
            -
          • IBM SPSS Statistics 24 is the latest version of the world’s leading statistical software used to solve business and research problems by means of ad-hoc analysis, hypothesis testing, and predictive analytics.
          • -
          • A crack keygen software is a software that generates serial keys or activation codes for another software that requires them. A crack keygen software is usually created by hackers or crackers who want to bypass the security measures of the original software and distribute it for free or for malicious purposes.
          • -
          • Using crack keygen software for IBM SPSS Statistics 24 or any other software is not only illegal, but also risky and disadvantageous. It exposes you to viruses and malware, poor performance and compatibility, no support or warranty, and legal consequences. It also violates the intellectual property rights of the original software developer, creates unfair competition for other users or competitors, and compromises academic honesty for researchers or students.
          • -
          • You can get IBM SPSS Statistics 24 legally and safely by buying it from the official website or an authorized reseller, or by using some of the alternatives and options that are available for free or at a lower cost. However, each option has its own advantages and disadvantages that you need to consider before making a decision.
          • -
          -

          Recommendations and tips for the readers

          -

          Here are some recommendations and tips for the readers who want to get IBM SP SS Statistics 24 legally and safely:

          -
            -
          • Compare the prices and features of different types of licenses (subscription or traditional) and choose the one that suits your budget and needs.
          • -
          • Check the system requirements and compatibility of IBM SPSS Statistics 24 before buying or downloading it, and make sure your system meets or exceeds them.
          • -
          • Download and install IBM SPSS Statistics 24 only from the official website or an authorized reseller, and avoid any suspicious or unknown sources that may offer crack keygen software.
          • -
          • Keep your IBM SPSS Statistics 24 software updated and secure, and follow the license agreement terms and conditions.
          • -
          • If you are a student or an educator, take advantage of the discounted price for IBM SPSS Statistics 24, and use it only for non-commercial purposes.
          • -
          • If you want to try some of the open source alternatives for IBM SPSS Statistics 24, such as R, Python, PSPP, or JASP, learn how to use them properly and effectively, and compare their features and capabilities with IBM SPSS Statistics 24.
          • -
          -

          FAQs

          -

          Here are some frequently asked questions (FAQs) about IBM SPSS Statistics 24 and crack keygen software:

          -
            -
          1. What is the difference between IBM SPSS Statistics 24 subscription and traditional licenses?
            -A subscription license allows you to use IBM SPSS Statistics 24 for a fixed period of time (monthly or annually) and pay as you go. A traditional license allows you to use IBM SPSS Statistics 24 indefinitely and pay a one-time fee. A subscription license offers more flexibility and affordability, while a traditional license offers more stability and security.
          2. -
          3. How can I extend or renew my IBM SPSS Statistics 24 free trial?
            -You cannot extend or renew your IBM SPSS Statistics 24 free trial once it expires. You will need to buy a subscription or a traditional license to continue using the software.
          4. -
          5. How can I uninstall IBM SPSS Statistics 24 from my system?
            -You can uninstall IBM SPSS Statistics 24 from your system by following these steps:
              -
            1. Close any open instances of IBM SPSS Statistics 24.
            2. -
            3. Open the Control Panel on your system.
            4. -
            5. Select Programs and Features or Add or Remove Programs.
            6. -
            7. Select IBM SPSS Statistics 24 from the list of programs and click Uninstall or Remove.
            8. -
            9. Follow the instructions on the screen to complete the uninstallation process.
            10. -
          6. -
          7. How can I contact IBM SPSS Statistics 24 support or customer service?
            -You can contact IBM SPSS Statistics 24 support or customer service by visiting the IBM Support Portal, where you can find resources such as documentation, forums, downloads, tutorials, etc. You can also submit a ticket, chat with an agent, or call a phone number depending on your location and issue.
          8. -
          9. How can I report a crack keygen software website or source?
            -You can report a crack keygen software website or source by contacting the original software developer or the relevant authorities in your country. You can also use online tools such as Google Safe Browsing or Web of Trust to report malicious or unsafe websites.
          10. -
          - References: : [IBM SPSS Statistics](https://www.ibm.com/products/spss-statistics) : [IBM SPSS Statistics Features](https://www.ibm.com/products/spss-statistics/details) : [IBM Knowledge Center - Uninstalling](https://www.ibm.com/docs/en/spss-statistics/24.0.0?topic=installation-uninstalling) : [Google Safe Browsing](https://safebrowsing.google.com/) : [Web of Trust](https://www.mywot.com/)

          b2dd77e56b
          -
          -
          \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Kashiwagi Yuki Solo Concert Download.md b/spaces/tioseFevbu/cartoon-converter/scripts/Kashiwagi Yuki Solo Concert Download.md deleted file mode 100644 index 222acdb3e0f48f68221e9026f825f5bf5b55959e..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Kashiwagi Yuki Solo Concert Download.md +++ /dev/null @@ -1,16 +0,0 @@ -
          -

          Kashiwagi Yuki: The Idol Who Shines on Stage

          -

          Kashiwagi Yuki, also known as Yukirin, is one of the most popular and talented members of AKB48, a Japanese idol group. She has been with the group since 2007 and has participated in many singles, albums, and concerts. She is also a solo artist who has released four solo singles and two solo albums.

          -

          Kashiwagi Yuki Solo Concert Download


          Download Zip ··· https://urlcod.com/2uHy1x



          -

          One of her most memorable solo performances was at the Laguna Music Festival 2018, where she held a solo concert for her fans. She sang 18 songs, including her solo hits, AKB48 songs, and covers of other artists. She also interacted with the audience and showed her charming personality. The concert was a success and received positive feedback from the fans and critics.

          -

          If you missed the concert or want to relive the experience, you can download the video of the concert from various online platforms. You can also watch some clips of the concert on YouTube[^1^] [^2^]. You will be amazed by Yukirin's vocal skills, stage presence, and charisma. She is truly a star who deserves your attention and support.

          -

          Kashiwagi Yuki is not only a great singer but also a versatile performer who can act, model, and host. She has appeared in several dramas, movies, variety shows, and magazines. She is also active on social media and often updates her fans with her activities and thoughts. You can follow her on Twitter (@Yukiriiiin__K) and Instagram (@yukikashiwagi_official).

          -

          -

          Kashiwagi Yuki is one of the most influential idols in Japan and has a loyal fanbase around the world. She is an inspiration to many aspiring singers and performers who want to pursue their dreams. She is also a role model for many young girls who admire her beauty, talent, and personality. She is a legend who will continue to shine on stage and beyond.

          - -

          However, Kashiwagi Yuki's career has not been without challenges and difficulties. In June 2021, she announced that she would be taking a hiatus to undergo surgery for a spinal cord condition. She revealed that she had been suffering from numbness and pain in her limbs for a long time and that the doctors found a tumor in her spinal cord. She expressed her determination to overcome the surgery and return to her fans as soon as possible.

          -

          Fortunately, the surgery was successful and Kashiwagi Yuki was able to recover well. She was discharged from the hospital in July 2021 and started her rehabilitation process. She also resumed her activities gradually, such as updating her blog, appearing on radio shows, and participating in online events. She thanked her fans for their support and prayers during her difficult time.

          -

          In October 2021, Kashiwagi Yuki made her comeback on stage at the AKB48 Theater, where she performed with her fellow Team B members. She also held a solo concert at the same venue on November 6, 2021, to celebrate her 15th anniversary of debut. She sang 23 songs, including some of her new songs that she wrote during her hiatus. She also announced that she would be releasing a new solo single titled "Kimi no Koto ga Suki" on December 8, 2021.

          -

          Kashiwagi Yuki has shown her resilience and passion for music and entertainment despite the obstacles and hardships she faced. She has also proven her talent and popularity as a solo artist and a member of AKB48. She is looking forward to creating more memories with her fans and delivering more amazing performances in the future.

          7b8c122e87
          -
          -
          \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py deleted file mode 100644 index 41a8fd174cbc556d495aca1d58af8e2197ace913..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/urllib3/contrib/ntlmpool.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -NTLM authenticating pool, contributed by erikcederstran - -Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10 -""" -from __future__ import absolute_import - -import warnings -from logging import getLogger - -from ntlm import ntlm - -from .. import HTTPSConnectionPool -from ..packages.six.moves.http_client import HTTPSConnection - -warnings.warn( - "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed " - "in urllib3 v2.0 release, urllib3 is not able to support it properly due " - "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. " - "If you are a user of this module please comment in the mentioned issue.", - DeprecationWarning, -) - -log = getLogger(__name__) - - -class NTLMConnectionPool(HTTPSConnectionPool): - """ - Implements an NTLM authentication version of an urllib3 connection pool - """ - - scheme = "https" - - def __init__(self, user, pw, authurl, *args, **kwargs): - """ - authurl is a random URL on the server that is protected by NTLM. - user is the Windows user, probably in the DOMAIN\\username format. - pw is the password for the user. - """ - super(NTLMConnectionPool, self).__init__(*args, **kwargs) - self.authurl = authurl - self.rawuser = user - user_parts = user.split("\\", 1) - self.domain = user_parts[0].upper() - self.user = user_parts[1] - self.pw = pw - - def _new_conn(self): - # Performs the NTLM handshake that secures the connection. The socket - # must be kept open while requests are performed. - self.num_connections += 1 - log.debug( - "Starting NTLM HTTPS connection no. %d: https://%s%s", - self.num_connections, - self.host, - self.authurl, - ) - - headers = {"Connection": "Keep-Alive"} - req_header = "Authorization" - resp_header = "www-authenticate" - - conn = HTTPSConnection(host=self.host, port=self.port) - - # Send negotiation message - headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE( - self.rawuser - ) - log.debug("Request headers: %s", headers) - conn.request("GET", self.authurl, None, headers) - res = conn.getresponse() - reshdr = dict(res.getheaders()) - log.debug("Response status: %s %s", res.status, res.reason) - log.debug("Response headers: %s", reshdr) - log.debug("Response data: %s [...]", res.read(100)) - - # Remove the reference to the socket, so that it can not be closed by - # the response object (we want to keep the socket open) - res.fp = None - - # Server should respond with a challenge message - auth_header_values = reshdr[resp_header].split(", ") - auth_header_value = None - for s in auth_header_values: - if s[:5] == "NTLM ": - auth_header_value = s[5:] - if auth_header_value is None: - raise Exception( - "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header]) - ) - - # Send authentication message - ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE( - auth_header_value - ) - auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE( - ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags - ) - headers[req_header] = "NTLM %s" % auth_msg - log.debug("Request headers: %s", headers) - conn.request("GET", self.authurl, None, headers) - res = conn.getresponse() - log.debug("Response status: %s %s", res.status, res.reason) - log.debug("Response headers: %s", dict(res.getheaders())) - log.debug("Response data: %s [...]", res.read()[:100]) - if res.status != 200: - if res.status == 401: - raise Exception("Server rejected request: wrong username or password") - raise Exception("Wrong server response: %s %s" % (res.status, res.reason)) - - res.fp = None - log.debug("Connection established") - return conn - - def urlopen( - self, - method, - url, - body=None, - headers=None, - retries=3, - redirect=True, - assert_same_host=True, - ): - if headers is None: - headers = {} - headers["Connection"] = "Keep-Alive" - return super(NTLMConnectionPool, self).urlopen( - method, url, body, headers, retries, redirect, assert_same_host - ) diff --git a/spaces/tomofi/MMOCR/mmocr/datasets/ocr_dataset.py b/spaces/tomofi/MMOCR/mmocr/datasets/ocr_dataset.py deleted file mode 100644 index b24d15d6046d2cdd0c911fe1ecc888933418cd05..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/datasets/ocr_dataset.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from mmdet.datasets.builder import DATASETS - -from mmocr.core.evaluation.ocr_metric import eval_ocr_metric -from mmocr.datasets.base_dataset import BaseDataset - - -@DATASETS.register_module() -class OCRDataset(BaseDataset): - - def pre_pipeline(self, results): - results['img_prefix'] = self.img_prefix - results['text'] = results['img_info']['text'] - - def evaluate(self, results, metric='acc', logger=None, **kwargs): - """Evaluate the dataset. - - Args: - results (list): Testing results of the dataset. - metric (str | list[str]): Metrics to be evaluated. - logger (logging.Logger | str | None): Logger used for printing - related information during evaluation. Default: None. - Returns: - dict[str: float] - """ - gt_texts = [] - pred_texts = [] - for i in range(len(self)): - item_info = self.data_infos[i] - text = item_info['text'] - gt_texts.append(text) - pred_texts.append(results[i]['text']) - - eval_results = eval_ocr_metric(pred_texts, gt_texts) - - return eval_results diff --git a/spaces/tomofi/MMOCR/tools/deployment/pytorch2onnx.py b/spaces/tomofi/MMOCR/tools/deployment/pytorch2onnx.py deleted file mode 100644 index fce63e907226728fb1f5db231742ede394835ca8..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tools/deployment/pytorch2onnx.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from argparse import ArgumentParser -from functools import partial - -import cv2 -import numpy as np -import torch -from mmcv.onnx import register_extra_symbolics -from mmcv.parallel import collate -from mmdet.datasets import replace_ImageToTensor -from mmdet.datasets.pipelines import Compose -from torch import nn - -from mmocr.apis import init_detector -from mmocr.core.deployment import ONNXRuntimeDetector, ONNXRuntimeRecognizer -from mmocr.datasets.pipelines.crop import crop_img # noqa: F401 -from mmocr.utils import is_2dlist - - -def _convert_batchnorm(module): - module_output = module - if isinstance(module, torch.nn.SyncBatchNorm): - module_output = torch.nn.BatchNorm2d(module.num_features, module.eps, - module.momentum, module.affine, - module.track_running_stats) - if module.affine: - module_output.weight.data = module.weight.data.clone().detach() - module_output.bias.data = module.bias.data.clone().detach() - # keep requires_grad unchanged - module_output.weight.requires_grad = module.weight.requires_grad - module_output.bias.requires_grad = module.bias.requires_grad - module_output.running_mean = module.running_mean - module_output.running_var = module.running_var - module_output.num_batches_tracked = module.num_batches_tracked - for name, child in module.named_children(): - module_output.add_module(name, _convert_batchnorm(child)) - del module - return module_output - - -def _prepare_data(cfg, imgs): - """Inference image(s) with the detector. - - Args: - model (nn.Module): The loaded detector. - imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): - Either image files or loaded images. - Returns: - result (dict): Predicted results. - """ - if isinstance(imgs, (list, tuple)): - if not isinstance(imgs[0], (np.ndarray, str)): - raise AssertionError('imgs must be strings or numpy arrays') - - elif isinstance(imgs, (np.ndarray, str)): - imgs = [imgs] - else: - raise AssertionError('imgs must be strings or numpy arrays') - - is_ndarray = isinstance(imgs[0], np.ndarray) - - if is_ndarray: - cfg = cfg.copy() - # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray' - - cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) - test_pipeline = Compose(cfg.data.test.pipeline) - - data = [] - for img in imgs: - # prepare data - if is_ndarray: - # directly add img - datum = dict(img=img) - else: - # add information into dict - datum = dict(img_info=dict(filename=img), img_prefix=None) - - # build the data pipeline - datum = test_pipeline(datum) - # get tensor from list to stack for batch mode (text detection) - data.append(datum) - - if isinstance(data[0]['img'], list) and len(data) > 1: - raise Exception('aug test does not support ' - f'inference with batch size ' - f'{len(data)}') - - data = collate(data, samples_per_gpu=len(imgs)) - - # process img_metas - if isinstance(data['img_metas'], list): - data['img_metas'] = [ - img_metas.data[0] for img_metas in data['img_metas'] - ] - else: - data['img_metas'] = data['img_metas'].data - - if isinstance(data['img'], list): - data['img'] = [img.data for img in data['img']] - if isinstance(data['img'][0], list): - data['img'] = [img[0] for img in data['img']] - else: - data['img'] = data['img'].data - return data - - -def pytorch2onnx(model: nn.Module, - model_type: str, - img_path: str, - verbose: bool = False, - show: bool = False, - opset_version: int = 11, - output_file: str = 'tmp.onnx', - verify: bool = False, - dynamic_export: bool = False, - device_id: int = 0): - """Export PyTorch model to ONNX model and verify the outputs are same - between PyTorch and ONNX. - - Args: - model (nn.Module): PyTorch model we want to export. - model_type (str): Model type, detection or recognition model. - img_path (str): We need to use this input to execute the model. - opset_version (int): The onnx op version. Default: 11. - verbose (bool): Whether print the computation graph. Default: False. - show (bool): Whether visialize final results. Default: False. - output_file (string): The path to where we store the output ONNX model. - Default: `tmp.onnx`. - verify (bool): Whether compare the outputs between PyTorch and ONNX. - Default: False. - dynamic_export (bool): Whether apply dynamic export. - Default: False. - device_id (id): Device id to place model and data. - Default: 0 - """ - device = torch.device(type='cuda', index=device_id) - model.to(device).eval() - _convert_batchnorm(model) - - # prepare inputs - mm_inputs = _prepare_data(cfg=model.cfg, imgs=img_path) - imgs = mm_inputs.pop('img') - img_metas = mm_inputs.pop('img_metas') - - if isinstance(imgs, list): - imgs = imgs[0] - - img_list = [img[None, :].to(device) for img in imgs] - - origin_forward = model.forward - if (model_type == 'det'): - model.forward = partial( - model.simple_test, img_metas=img_metas, rescale=True) - else: - model.forward = partial( - model.forward, - img_metas=img_metas, - return_loss=False, - rescale=True) - - # pytorch has some bug in pytorch1.3, we have to fix it - # by replacing these existing op - register_extra_symbolics(opset_version) - dynamic_axes = None - if dynamic_export and model_type == 'det': - dynamic_axes = { - 'input': { - 0: 'batch', - 2: 'height', - 3: 'width' - }, - 'output': { - 0: 'batch', - 2: 'height', - 3: 'width' - } - } - elif dynamic_export and model_type == 'recog': - dynamic_axes = { - 'input': { - 0: 'batch', - 3: 'width' - }, - 'output': { - 0: 'batch', - 1: 'seq_len', - 2: 'num_classes' - } - } - with torch.no_grad(): - torch.onnx.export( - model, (img_list[0], ), - output_file, - input_names=['input'], - output_names=['output'], - export_params=True, - keep_initializers_as_inputs=False, - verbose=verbose, - opset_version=opset_version, - dynamic_axes=dynamic_axes) - print(f'Successfully exported ONNX model: {output_file}') - if verify: - # check by onnx - import onnx - onnx_model = onnx.load(output_file) - onnx.checker.check_model(onnx_model) - - scale_factor = (0.5, 0.5) if model_type == 'det' else (1, 0.5) - if dynamic_export: - # scale image for dynamic shape test - img_list = [ - nn.functional.interpolate(_, scale_factor=scale_factor) - for _ in img_list - ] - if model_type == 'det': - img_metas[0][0][ - 'scale_factor'] = img_metas[0][0]['scale_factor'] * ( - scale_factor * 2) - - # check the numerical value - # get pytorch output - with torch.no_grad(): - model.forward = origin_forward - pytorch_out = model.simple_test( - img_list[0], img_metas[0], rescale=True) - - # get onnx output - if model_type == 'det': - onnx_model = ONNXRuntimeDetector(output_file, model.cfg, device_id) - else: - onnx_model = ONNXRuntimeRecognizer(output_file, model.cfg, - device_id) - onnx_out = onnx_model.simple_test( - img_list[0], img_metas[0], rescale=True) - - # compare results - same_diff = 'same' - if model_type == 'recog': - for onnx_result, pytorch_result in zip(onnx_out, pytorch_out): - if onnx_result['text'] != pytorch_result[ - 'text'] or not np.allclose( - np.array(onnx_result['score']), - np.array(pytorch_result['score']), - rtol=1e-4, - atol=1e-4): - same_diff = 'different' - break - else: - for onnx_result, pytorch_result in zip( - onnx_out[0]['boundary_result'], - pytorch_out[0]['boundary_result']): - if not np.allclose( - np.array(onnx_result), - np.array(pytorch_result), - rtol=1e-4, - atol=1e-4): - same_diff = 'different' - break - print('The outputs are {} between PyTorch and ONNX'.format(same_diff)) - - if show: - onnx_img = onnx_model.show_result( - img_path, onnx_out[0], out_file='onnx.jpg', show=False) - pytorch_img = model.show_result( - img_path, pytorch_out[0], out_file='pytorch.jpg', show=False) - if onnx_img is None: - onnx_img = cv2.imread(img_path) - if pytorch_img is None: - pytorch_img = cv2.imread(img_path) - - cv2.imshow('PyTorch', pytorch_img) - cv2.imshow('ONNXRuntime', onnx_img) - cv2.waitKey() - return - - -def main(): - parser = ArgumentParser( - description='Convert MMOCR models from pytorch to ONNX') - parser.add_argument('model_config', type=str, help='Config file.') - parser.add_argument( - 'model_ckpt', type=str, help='Checkpint file (local or url).') - parser.add_argument( - 'model_type', - type=str, - help='Detection or recognition model to deploy.', - choices=['recog', 'det']) - parser.add_argument('image_path', type=str, help='Input Image file.') - parser.add_argument( - '--output-file', - type=str, - help='Output file name of the onnx model.', - default='tmp.onnx') - parser.add_argument( - '--device-id', default=0, help='Device used for inference.') - parser.add_argument( - '--opset-version', - type=int, - help='ONNX opset version, default to 11.', - default=11) - parser.add_argument( - '--verify', - action='store_true', - help='Whether verify the outputs of onnx and pytorch are same.', - default=False) - parser.add_argument( - '--verbose', - action='store_true', - help='Whether print the computation graph.', - default=False) - parser.add_argument( - '--show', - action='store_true', - help='Whether visualize final output.', - default=False) - parser.add_argument( - '--dynamic-export', - action='store_true', - help='Whether dynamically export onnx model.', - default=False) - args = parser.parse_args() - - # Following strings of text style are from colorama package - bright_style, reset_style = '\x1b[1m', '\x1b[0m' - red_text, blue_text = '\x1b[31m', '\x1b[34m' - white_background = '\x1b[107m' - - msg = white_background + bright_style + red_text - msg += 'DeprecationWarning: This tool will be deprecated in future. ' - msg += blue_text + 'Welcome to use the unified model deployment toolbox ' - msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' - msg += reset_style - warnings.warn(msg) - - device = torch.device(type='cuda', index=args.device_id) - - # build model - model = init_detector(args.model_config, args.model_ckpt, device=device) - if hasattr(model, 'module'): - model = model.module - if model.cfg.data.test.get('pipeline', None) is None: - if is_2dlist(model.cfg.data.test.datasets): - model.cfg.data.test.pipeline = \ - model.cfg.data.test.datasets[0][0].pipeline - else: - model.cfg.data.test.pipeline = \ - model.cfg.data.test['datasets'][0].pipeline - if is_2dlist(model.cfg.data.test.pipeline): - model.cfg.data.test.pipeline = model.cfg.data.test.pipeline[0] - - pytorch2onnx( - model, - model_type=args.model_type, - output_file=args.output_file, - img_path=args.image_path, - opset_version=args.opset_version, - verify=args.verify, - verbose=args.verbose, - show=args.show, - device_id=args.device_id, - dynamic_export=args.dynamic_export) - - -if __name__ == '__main__': - main() diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/env.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/env.py deleted file mode 100644 index 1c7db32e41ec266ead9734f90d0173b4feff61ef..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/utils/env.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -import os - -from maskrcnn_benchmark.utils.imports import import_file - - -def setup_environment(): - """Perform environment setup work. The default setup is a no-op, but this - function allows the user to specify a Python source file that performs - custom setup work that may be necessary to their computing environment. - """ - custom_module_path = os.environ.get("TORCH_DETECTRON_ENV_MODULE") - if custom_module_path: - setup_custom_environment(custom_module_path) - else: - # The default setup is a no-op - pass - - -def setup_custom_environment(custom_module_path): - """Load custom environment setup from a Python source file and run the setup - function. - """ - module = import_file("maskrcnn_benchmark.utils.env.custom_module", custom_module_path) - assert hasattr(module, "setup_environment") and callable( - module.setup_environment - ), ( - "Custom environment module defined in {} does not have the " - "required callable attribute 'setup_environment'." - ).format( - custom_module_path - ) - module.setup_environment() - - -# Force environment setup when this module is imported -setup_environment() diff --git a/spaces/ttt246/brain/Brain/src/rising_plugin/rails_validate.py b/spaces/ttt246/brain/Brain/src/rising_plugin/rails_validate.py deleted file mode 100644 index 102f802eab9e2044be52536129cddac0d67538af..0000000000000000000000000000000000000000 --- a/spaces/ttt246/brain/Brain/src/rising_plugin/rails_validate.py +++ /dev/null @@ -1,15 +0,0 @@ -"""validate rails result: -checking with program whether is it message or rails_off_topic""" -import json - -from Brain.src.common.program_type import ProgramType - - -def validate_rails(data: str) -> bool: - try: - json_obj = json.loads(data["content"]) - if json_obj["program"] == ProgramType.RAILS_OFF_TOPIC: - return False - return True - except Exception as ex: - return False diff --git a/spaces/tweakdoor/stabilityai-stable-diffusion-2-1/app.py b/spaces/tweakdoor/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/tweakdoor/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/ucalyptus/PTI/models/StyleCLIP/global_directions/Inference.py b/spaces/ucalyptus/PTI/models/StyleCLIP/global_directions/Inference.py deleted file mode 100644 index a292787c88a370b15b4f0d633ac27bb5bed2b510..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/models/StyleCLIP/global_directions/Inference.py +++ /dev/null @@ -1,106 +0,0 @@ - - -from manipulate import Manipulator -import tensorflow as tf -import numpy as np -import torch -import clip -from MapTS import GetBoundary,GetDt - -class StyleCLIP(): - - def __init__(self,dataset_name='ffhq'): - print('load clip') - device = "cuda" if torch.cuda.is_available() else "cpu" - self.model, preprocess = clip.load("ViT-B/32", device=device) - self.LoadData(dataset_name) - - def LoadData(self, dataset_name): - tf.keras.backend.clear_session() - M=Manipulator(dataset_name=dataset_name) - np.set_printoptions(suppress=True) - fs3=np.load('./npy/'+dataset_name+'/fs3.npy') - - self.M=M - self.fs3=fs3 - - w_plus=np.load('./data/'+dataset_name+'/w_plus.npy') - self.M.dlatents=M.W2S(w_plus) - - if dataset_name=='ffhq': - self.c_threshold=20 - else: - self.c_threshold=100 - self.SetInitP() - - def SetInitP(self): - self.M.alpha=[3] - self.M.num_images=1 - - self.target='' - self.neutral='' - self.GetDt2() - img_index=0 - self.M.dlatent_tmp=[tmp[img_index:(img_index+1)] for tmp in self.M.dlatents] - - - def GetDt2(self): - classnames=[self.target,self.neutral] - dt=GetDt(classnames,self.model) - - self.dt=dt - num_cs=[] - betas=np.arange(0.1,0.3,0.01) - for i in range(len(betas)): - boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=betas[i]) - print(betas[i]) - num_cs.append(num_c) - - num_cs=np.array(num_cs) - select=num_cs>self.c_threshold - - if sum(select)==0: - self.beta=0.1 - else: - self.beta=betas[select][-1] - - - def GetCode(self): - boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=self.beta) - codes=self.M.MSCode(self.M.dlatent_tmp,boundary_tmp2) - return codes - - def GetImg(self): - - codes=self.GetCode() - out=self.M.GenerateImg(codes) - img=out[0,0] - return img - - - - -#%% -if __name__ == "__main__": - style_clip=StyleCLIP() - self=style_clip - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/ucalyptus/PTI/models/StyleCLIP/models/facial_recognition/__init__.py b/spaces/ucalyptus/PTI/models/StyleCLIP/models/facial_recognition/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/umn-msi/fatchecker/unet/unet_3plus.py b/spaces/umn-msi/fatchecker/unet/unet_3plus.py deleted file mode 100644 index a56c0a2cd343d3e1f5ed2845bc3c939112a39fc6..0000000000000000000000000000000000000000 --- a/spaces/umn-msi/fatchecker/unet/unet_3plus.py +++ /dev/null @@ -1,440 +0,0 @@ -import tensorflow as tf -import tensorflow.keras as k - - -# Model Architecture -def conv_block(x, kernels, kernel_size=(3, 3), strides=(1, 1), padding='same', - is_bn=True, is_relu=True, n=2): - """ Custom function for conv2d: - Apply 3*3 convolutions with BN and relu. - """ - for i in range(1, n + 1): - x = k.layers.Conv2D(filters=kernels, kernel_size=kernel_size, - padding=padding, strides=strides, - kernel_regularizer=tf.keras.regularizers.l2(1e-4), - kernel_initializer=k.initializers.he_normal(seed=5))(x) - if is_bn: - x = k.layers.BatchNormalization()(x) - if is_relu: - x = k.activations.relu(x) - - return x - - -def dotProduct(seg, cls): - B, H, W, N = k.backend.int_shape(seg) - seg = tf.reshape(seg, [-1, H * W, N]) - final = tf.einsum("ijk,ik->ijk", seg, cls) - final = tf.reshape(final, [-1, H, W, N]) - return final - - -""" UNet_3Plus """ -def UNet_3Plus(INPUT_SHAPE, OUTPUT_CHANNELS, pretrained_weights = None): - filters = [64, 128, 256, 512, 1024] - - input_layer = k.layers.Input(shape=INPUT_SHAPE, name="input_layer") # 320*320*3 - - """ Encoder""" - # block 1 - e1 = conv_block(input_layer, filters[0]) # 320*320*64 - - # block 2 - e2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 160*160*64 - e2 = conv_block(e2, filters[1]) # 160*160*128 - - # block 3 - e3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 80*80*128 - e3 = conv_block(e3, filters[2]) # 80*80*256 - - # block 4 - e4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 40*40*256 - e4 = conv_block(e4, filters[3]) # 40*40*512 - - # block 5 - # bottleneck layer - e5 = k.layers.MaxPool2D(pool_size=(2, 2))(e4) # 20*20*512 - e5 = conv_block(e5, filters[4]) # 20*20*1024 - - """ Decoder """ - cat_channels = filters[0] - cat_blocks = len(filters) - upsample_channels = cat_blocks * cat_channels - - """ d4 """ - e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64 - e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64 - - e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128 - e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64 - - e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256 - e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64 - - e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64 - - e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256 - e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64 - - d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4]) - d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320 - - """ d3 """ - e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64 - e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64 - - e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256 - e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64 - - e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64 - - e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320 - e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320 - e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3]) - d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320 - - """ d2 """ - e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64 - e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64 - - e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64 - - d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320 - d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320 - d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320 - e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2]) - d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320 - - """ d1 """ - e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64 - - d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320 - d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320 - d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320 - d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320 - e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ]) - d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320 - - # last layer does not have batchnorm and relu - d = conv_block(d1, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - - if OUTPUT_CHANNELS == 1: - output = k.activations.sigmoid(d) - else: - output = k.activations.softmax(d) - - model = tf.keras.Model(inputs=input_layer, outputs=output, name='UNet_3Plus') - if(pretrained_weights): - model.load_weights(pretrained_weights) - - return model - - -""" UNet_3Plus with Deep Supervison""" -def UNet_3Plus_DeepSup(INPUT_SHAPE, OUTPUT_CHANNELS, pretrained_weights = None): - filters = [64, 128, 256, 512, 1024] - - input_layer = k.layers.Input(shape=INPUT_SHAPE, name="input_layer") # 320*320*3 - - """ Encoder""" - # block 1 - e1 = conv_block(input_layer, filters[0]) # 320*320*64 - - # block 2 - e2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 160*160*64 - e2 = conv_block(e2, filters[1]) # 160*160*128 - - # block 3 - e3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 80*80*128 - e3 = conv_block(e3, filters[2]) # 80*80*256 - - # block 4 - e4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 40*40*256 - e4 = conv_block(e4, filters[3]) # 40*40*512 - - # block 5 - # bottleneck layer - e5 = k.layers.MaxPool2D(pool_size=(2, 2))(e4) # 20*20*512 - e5 = conv_block(e5, filters[4]) # 20*20*1024 - - """ Decoder """ - cat_channels = filters[0] - cat_blocks = len(filters) - upsample_channels = cat_blocks * cat_channels - - """ d4 """ - e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64 - e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64 - - e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128 - e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64 - - e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256 - e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64 - - e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64 - - e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256 - e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64 - - d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4]) - d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320 - - """ d3 """ - e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64 - e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64 - - e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256 - e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64 - - e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64 - - e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320 - e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320 - e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3]) - d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320 - - """ d2 """ - e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64 - e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64 - - e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64 - - d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320 - d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320 - d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320 - e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2]) - d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320 - - """ d1 """ - e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64 - - d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320 - d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320 - d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320 - d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320 - e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ]) - d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320 - - """ Deep Supervision Part""" - # last layer does not have batchnorm and relu - d1 = conv_block(d1, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d2 = conv_block(d2, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d3 = conv_block(d3, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d4 = conv_block(d4, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - e5 = conv_block(e5, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - - # d1 = no need for upsampling - d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) - d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) - d4 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) - e5 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) - - if OUTPUT_CHANNELS == 1: - d1 = k.activations.sigmoid(d1) - d2 = k.activations.sigmoid(d2) - d3 = k.activations.sigmoid(d3) - d4 = k.activations.sigmoid(d4) - e5 = k.activations.sigmoid(e5) - else: - d1 = k.activations.softmax(d1) - d2 = k.activations.softmax(d2) - d3 = k.activations.softmax(d3) - d4 = k.activations.softmax(d4) - e5 = k.activations.softmax(e5) - - model = tf.keras.Model(inputs=input_layer, outputs=[d1, d2, d3, d4, e5], name='UNet_3Plus_DeepSup') - - if(pretrained_weights): - model.load_weights(pretrained_weights) - - return model - - -""" UNet_3Plus with Deep Supervison and Classification Guided Module""" -def UNet_3Plus_DeepSup_CGM(INPUT_SHAPE, OUTPUT_CHANNELS, pretrained_weights = None): - filters = [64, 128, 256, 512, 1024] - - input_layer = k.layers.Input(shape=INPUT_SHAPE, name="input_layer") # 320*320*3 - - """ Encoder""" - # block 1 - e1 = conv_block(input_layer, filters[0]) # 320*320*64 - - # block 2 - e2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 160*160*64 - e2 = conv_block(e2, filters[1]) # 160*160*128 - - # block 3 - e3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 80*80*128 - e3 = conv_block(e3, filters[2]) # 80*80*256 - - # block 4 - e4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 40*40*256 - e4 = conv_block(e4, filters[3]) # 40*40*512 - - # block 5, bottleneck layer - e5 = k.layers.MaxPool2D(pool_size=(2, 2))(e4) # 20*20*512 - e5 = conv_block(e5, filters[4]) # 20*20*1024 - - """ Classification Guided Module. Part 1""" - cls = k.layers.Dropout(rate=0.5)(e5) - cls = k.layers.Conv2D(2, kernel_size=(1, 1), padding="same", strides=(1, 1))(cls) - cls = k.layers.GlobalMaxPooling2D()(cls) - cls = k.activations.sigmoid(cls) - cls = tf.argmax(cls, axis=-1) - cls = cls[..., tf.newaxis] - cls = tf.cast(cls, dtype=tf.float32, ) - - """ Decoder """ - cat_channels = filters[0] - cat_blocks = len(filters) - upsample_channels = cat_blocks * cat_channels - - """ d4 """ - e1_d4 = k.layers.MaxPool2D(pool_size=(8, 8))(e1) # 320*320*64 --> 40*40*64 - e1_d4 = conv_block(e1_d4, cat_channels, n=1) # 320*320*64 --> 40*40*64 - - e2_d4 = k.layers.MaxPool2D(pool_size=(4, 4))(e2) # 160*160*128 --> 40*40*128 - e2_d4 = conv_block(e2_d4, cat_channels, n=1) # 160*160*128 --> 40*40*64 - - e3_d4 = k.layers.MaxPool2D(pool_size=(2, 2))(e3) # 80*80*256 --> 40*40*256 - e3_d4 = conv_block(e3_d4, cat_channels, n=1) # 80*80*256 --> 40*40*64 - - e4_d4 = conv_block(e4, cat_channels, n=1) # 40*40*512 --> 40*40*64 - - e5_d4 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(e5) # 80*80*256 --> 40*40*256 - e5_d4 = conv_block(e5_d4, cat_channels, n=1) # 20*20*1024 --> 20*20*64 - - d4 = k.layers.concatenate([e1_d4, e2_d4, e3_d4, e4_d4, e5_d4]) - d4 = conv_block(d4, upsample_channels, n=1) # 40*40*320 --> 40*40*320 - - """ d3 """ - e1_d3 = k.layers.MaxPool2D(pool_size=(4, 4))(e1) # 320*320*64 --> 80*80*64 - e1_d3 = conv_block(e1_d3, cat_channels, n=1) # 80*80*64 --> 80*80*64 - - e2_d3 = k.layers.MaxPool2D(pool_size=(2, 2))(e2) # 160*160*256 --> 80*80*256 - e2_d3 = conv_block(e2_d3, cat_channels, n=1) # 80*80*256 --> 80*80*64 - - e3_d3 = conv_block(e3, cat_channels, n=1) # 80*80*512 --> 80*80*64 - - e4_d3 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d4) # 40*40*320 --> 80*80*320 - e4_d3 = conv_block(e4_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - e5_d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(e5) # 20*20*320 --> 80*80*320 - e5_d3 = conv_block(e5_d3, cat_channels, n=1) # 80*80*320 --> 80*80*64 - - d3 = k.layers.concatenate([e1_d3, e2_d3, e3_d3, e4_d3, e5_d3]) - d3 = conv_block(d3, upsample_channels, n=1) # 80*80*320 --> 80*80*320 - - """ d2 """ - e1_d2 = k.layers.MaxPool2D(pool_size=(2, 2))(e1) # 320*320*64 --> 160*160*64 - e1_d2 = conv_block(e1_d2, cat_channels, n=1) # 160*160*64 --> 160*160*64 - - e2_d2 = conv_block(e2, cat_channels, n=1) # 160*160*256 --> 160*160*64 - - d3_d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d3) # 80*80*320 --> 160*160*320 - d3_d2 = conv_block(d3_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d4_d2 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d4) # 40*40*320 --> 160*160*320 - d4_d2 = conv_block(d4_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - e5_d2 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(e5) # 20*20*320 --> 160*160*320 - e5_d2 = conv_block(e5_d2, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d2 = k.layers.concatenate([e1_d2, e2_d2, d3_d2, d4_d2, e5_d2]) - d2 = conv_block(d2, upsample_channels, n=1) # 160*160*320 --> 160*160*320 - - """ d1 """ - e1_d1 = conv_block(e1, cat_channels, n=1) # 320*320*64 --> 320*320*64 - - d2_d1 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) # 160*160*320 --> 320*320*320 - d2_d1 = conv_block(d2_d1, cat_channels, n=1) # 160*160*320 --> 160*160*64 - - d3_d1 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) # 80*80*320 --> 320*320*320 - d3_d1 = conv_block(d3_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d4_d1 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) # 40*40*320 --> 320*320*320 - d4_d1 = conv_block(d4_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - e5_d1 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) # 20*20*320 --> 320*320*320 - e5_d1 = conv_block(e5_d1, cat_channels, n=1) # 320*320*320 --> 320*320*64 - - d1 = k.layers.concatenate([e1_d1, d2_d1, d3_d1, d4_d1, e5_d1, ]) - d1 = conv_block(d1, upsample_channels, n=1) # 320*320*320 --> 320*320*320 - - """ Deep Supervision Part""" - # last layer does not have batchnorm and relu - d1 = conv_block(d1, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d2 = conv_block(d2, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d3 = conv_block(d3, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - d4 = conv_block(d4, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - e5 = conv_block(e5, OUTPUT_CHANNELS, n=1, is_bn=False, is_relu=False) - - # d1 = no need for upsampling - d2 = k.layers.UpSampling2D(size=(2, 2), interpolation='bilinear')(d2) - d3 = k.layers.UpSampling2D(size=(4, 4), interpolation='bilinear')(d3) - d4 = k.layers.UpSampling2D(size=(8, 8), interpolation='bilinear')(d4) - e5 = k.layers.UpSampling2D(size=(16, 16), interpolation='bilinear')(e5) - - """ Classification Guided Module. Part 2""" - d1 = dotProduct(d1, cls) - d2 = dotProduct(d2, cls) - d3 = dotProduct(d3, cls) - d4 = dotProduct(d4, cls) - e5 = dotProduct(e5, cls) - - if OUTPUT_CHANNELS == 1: - d1 = k.activations.sigmoid(d1) - d2 = k.activations.sigmoid(d2) - d3 = k.activations.sigmoid(d3) - d4 = k.activations.sigmoid(d4) - e5 = k.activations.sigmoid(e5) - else: - d1 = k.activations.softmax(d1) - d2 = k.activations.softmax(d2) - d3 = k.activations.softmax(d3) - d4 = k.activations.softmax(d4) - e5 = k.activations.softmax(e5) - - model = tf.keras.Model(inputs=input_layer, outputs=[d1, d2, d3, d4, e5], name='UNet_3Plus_DeepSup_CGM') - if(pretrained_weights): - model.load_weights(pretrained_weights) - - return model \ No newline at end of file diff --git a/spaces/usecodenaija/x-spaces-web-ui/app.py b/spaces/usecodenaija/x-spaces-web-ui/app.py deleted file mode 100644 index 078cf7bdee2e192383dd0026aa72836188d83629..0000000000000000000000000000000000000000 --- a/spaces/usecodenaija/x-spaces-web-ui/app.py +++ /dev/null @@ -1,103 +0,0 @@ -import gradio as gr -import requests -from dotenv import load_dotenv -import os, datetime - -load_dotenv() -API_URL = os.environ.get("API_URL", "http://0.0.0.0:8002") -APP_ENV = os.environ.get("APP_ENV", None) - - -DATA_DIR = "data/temp_audios/" - - -def x_spaces_interface(topic, description, speaker1, gender1, speaker2, gender2, media_type, theme=None): - # Define your payload/data to send to the external API - data = { - "topic": topic, - "description": description, - "speaker1": speaker1, - "gender1": gender1, # Added gender fields - "speaker2": speaker2, - "gender2": gender2, - "video": True if media_type == "Video" else False, - "theme": theme - } - - # Make the API call - response = requests.post(API_URL, json=data) - - timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") - - # Ensure the API call was successful - if response.status_code != 200: - print("Error: Unable to fetch audio from the external API.") - error_msg = f"Error {response.status_code}: {response.text}" - print(error_msg) - return None, None - - if media_type == "Audio": - filename = f"temp_audio_{timestamp}.mp3" - audio_file_path = os.path.join(DATA_DIR, filename) - video_file_path = None # No video - else: - filename = f"temp_video_{timestamp}.mp4" - video_file_path = os.path.join(DATA_DIR, filename) - audio_file_path = None # No audio - - with open(audio_file_path if audio_file_path else video_file_path, "wb") as temp_file: - temp_file.write(response.content) - - return audio_file_path, video_file_path - - -video_html = """ -
          -

          X-Spaces Examples

          -
          - - -
          -
          -""" - - -media_choices = ["Audio", "Video"] if APP_ENV != "production" and APP_ENV is not None else ["Audio"] - - -iface = gr.Interface( - fn=x_spaces_interface, - inputs=[ - - gr.components.Textbox(placeholder="Enter Topic...", label="Topic"), - gr.components.Textbox(placeholder="Enter topic brief/description for context...", lines=5, label="Description"), - gr.components.Textbox(placeholder="Enter Speaker 1 name...", label="First Speaker"), # Use label for section - gr.components.Radio(choices=["MALE", "FEMALE"], label=""), - gr.components.Textbox(placeholder="Enter Speaker 2 name...", label="Second Speaker"), - gr.components.Radio(choices=["MALE", "FEMALE"], label=""), - gr.components.Dropdown(choices=media_choices, label="Media Type"), - gr.components.Dropdown(choices=["News", "Story", "Tutorial"], label="Theme"), - - - - - ], - - outputs=[ - gr.components.Audio(type="filepath", label="Audio output"), - gr.components.Video(label="Video output") - ], - article=video_html, - title="X-spaces", - description="Conversational Audio generation between two AI characters. [Click Here for More Information!](https://huggingface.co/spaces/usecodenaija/x-spaces-web-ui/blob/main/README.md)", - live=False -) -# Run the interface -iface.queue() -iface.launch() diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/nn/autobackend.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/nn/autobackend.py deleted file mode 100644 index e277957a7cfc868ee73b3d241ff5fc963f8ff6df..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/nn/autobackend.py +++ /dev/null @@ -1,458 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -import ast -import contextlib -import json -import platform -import zipfile -from collections import OrderedDict, namedtuple -from pathlib import Path -from urllib.parse import urlparse - -import cv2 -import numpy as np -import torch -import torch.nn as nn -from PIL import Image - -from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, yaml_load -from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version, check_yaml -from ultralytics.yolo.utils.downloads import attempt_download_asset, is_url -from ultralytics.yolo.utils.ops import xywh2xyxy - - -def check_class_names(names): - """Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts.""" - if isinstance(names, list): # names is a list - names = dict(enumerate(names)) # convert to dict - if isinstance(names, dict): - # Convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True' - names = {int(k): str(v) for k, v in names.items()} - n = len(names) - if max(names.keys()) >= n: - raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices ' - f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.') - if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764' - map = yaml_load(ROOT / 'datasets/ImageNet.yaml')['map'] # human-readable names - names = {k: map[v] for k, v in names.items()} - return names - - -class AutoBackend(nn.Module): - - def __init__(self, - weights='yolov8n.pt', - device=torch.device('cpu'), - dnn=False, - data=None, - fp16=False, - fuse=True, - verbose=True): - """ - MultiBackend class for python inference on various platforms using Ultralytics YOLO. - - Args: - weights (str): The path to the weights file. Default: 'yolov8n.pt' - device (torch.device): The device to run the model on. - dnn (bool): Use OpenCV DNN module for inference if True, defaults to False. - data (str | Path | optional): Additional data.yaml file for class names. - fp16 (bool): If True, use half precision. Default: False - fuse (bool): Whether to fuse the model or not. Default: True - verbose (bool): Whether to run in verbose mode or not. Default: True - - Supported formats and their naming conventions: - | Format | Suffix | - |-----------------------|------------------| - | PyTorch | *.pt | - | TorchScript | *.torchscript | - | ONNX Runtime | *.onnx | - | ONNX OpenCV DNN | *.onnx dnn=True | - | OpenVINO | *.xml | - | CoreML | *.mlmodel | - | TensorRT | *.engine | - | TensorFlow SavedModel | *_saved_model | - | TensorFlow GraphDef | *.pb | - | TensorFlow Lite | *.tflite | - | TensorFlow Edge TPU | *_edgetpu.tflite | - | PaddlePaddle | *_paddle_model | - """ - super().__init__() - w = str(weights[0] if isinstance(weights, list) else weights) - nn_module = isinstance(weights, torch.nn.Module) - pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn, triton = \ - self._model_type(w) - fp16 &= pt or jit or onnx or engine or nn_module or triton # FP16 - nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) - stride = 32 # default stride - model, metadata = None, None - cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA - if not (pt or triton or nn_module): - w = attempt_download_asset(w) # download if not local - - # NOTE: special case: in-memory pytorch model - if nn_module: - model = weights.to(device) - model = model.fuse(verbose=verbose) if fuse else model - if hasattr(model, 'kpt_shape'): - kpt_shape = model.kpt_shape # pose-only - stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() - pt = True - elif pt: # PyTorch - from ultralytics.nn.tasks import attempt_load_weights - model = attempt_load_weights(weights if isinstance(weights, list) else w, - device=device, - inplace=True, - fuse=fuse) - if hasattr(model, 'kpt_shape'): - kpt_shape = model.kpt_shape # pose-only - stride = max(int(model.stride.max()), 32) # model stride - names = model.module.names if hasattr(model, 'module') else model.names # get class names - model.half() if fp16 else model.float() - self.model = model # explicitly assign for to(), cpu(), cuda(), half() - elif jit: # TorchScript - LOGGER.info(f'Loading {w} for TorchScript inference...') - extra_files = {'config.txt': ''} # model metadata - model = torch.jit.load(w, _extra_files=extra_files, map_location=device) - model.half() if fp16 else model.float() - if extra_files['config.txt']: # load metadata dict - metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items())) - elif dnn: # ONNX OpenCV DNN - LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') - check_requirements('opencv-python>=4.5.4') - net = cv2.dnn.readNetFromONNX(w) - elif onnx: # ONNX Runtime - LOGGER.info(f'Loading {w} for ONNX Runtime inference...') - check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) - import onnxruntime - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - session = onnxruntime.InferenceSession(w, providers=providers) - output_names = [x.name for x in session.get_outputs()] - metadata = session.get_modelmeta().custom_metadata_map # metadata - elif xml: # OpenVINO - LOGGER.info(f'Loading {w} for OpenVINO inference...') - check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ - from openvino.runtime import Core, Layout, get_batch # noqa - ie = Core() - w = Path(w) - if not w.is_file(): # if not *.xml - w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir - network = ie.read_model(model=str(w), weights=w.with_suffix('.bin')) - if network.get_parameters()[0].get_layout().empty: - network.get_parameters()[0].set_layout(Layout('NCHW')) - batch_dim = get_batch(network) - if batch_dim.is_static: - batch_size = batch_dim.get_length() - executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for NCS2 - metadata = w.parent / 'metadata.yaml' - elif engine: # TensorRT - LOGGER.info(f'Loading {w} for TensorRT inference...') - try: - import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download - except ImportError: - if LINUX: - check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') - import tensorrt as trt # noqa - check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 - if device.type == 'cpu': - device = torch.device('cuda:0') - Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) - logger = trt.Logger(trt.Logger.INFO) - # Read file - with open(w, 'rb') as f, trt.Runtime(logger) as runtime: - meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length - metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata - model = runtime.deserialize_cuda_engine(f.read()) # read engine - context = model.create_execution_context() - bindings = OrderedDict() - output_names = [] - fp16 = False # default updated below - dynamic = False - for i in range(model.num_bindings): - name = model.get_binding_name(i) - dtype = trt.nptype(model.get_binding_dtype(i)) - if model.binding_is_input(i): - if -1 in tuple(model.get_binding_shape(i)): # dynamic - dynamic = True - context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) - if dtype == np.float16: - fp16 = True - else: # output - output_names.append(name) - shape = tuple(context.get_binding_shape(i)) - im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) - bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) - binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) - batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size - elif coreml: # CoreML - LOGGER.info(f'Loading {w} for CoreML inference...') - import coremltools as ct - model = ct.models.MLModel(w) - metadata = dict(model.user_defined_metadata) - elif saved_model: # TF SavedModel - LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') - import tensorflow as tf - keras = False # assume TF1 saved_model - model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) - metadata = Path(w) / 'metadata.yaml' - elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt - LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') - import tensorflow as tf - - from ultralytics.yolo.engine.exporter import gd_outputs - - def wrap_frozen_graph(gd, inputs, outputs): - """Wrap frozen graphs for deployment.""" - x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped - ge = x.graph.as_graph_element - return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) - - gd = tf.Graph().as_graph_def() # TF GraphDef - with open(w, 'rb') as f: - gd.ParseFromString(f.read()) - frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) - elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python - try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu - from tflite_runtime.interpreter import Interpreter, load_delegate - except ImportError: - import tensorflow as tf - Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate - if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime - LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') - delegate = { - 'Linux': 'libedgetpu.so.1', - 'Darwin': 'libedgetpu.1.dylib', - 'Windows': 'edgetpu.dll'}[platform.system()] - interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) - else: # TFLite - LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') - interpreter = Interpreter(model_path=w) # load TFLite model - interpreter.allocate_tensors() # allocate - input_details = interpreter.get_input_details() # inputs - output_details = interpreter.get_output_details() # outputs - # Load metadata - with contextlib.suppress(zipfile.BadZipFile): - with zipfile.ZipFile(w, 'r') as model: - meta_file = model.namelist()[0] - metadata = ast.literal_eval(model.read(meta_file).decode('utf-8')) - elif tfjs: # TF.js - raise NotImplementedError('YOLOv8 TF.js inference is not currently supported.') - elif paddle: # PaddlePaddle - LOGGER.info(f'Loading {w} for PaddlePaddle inference...') - check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') - import paddle.inference as pdi # noqa - w = Path(w) - if not w.is_file(): # if not *.pdmodel - w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir - config = pdi.Config(str(w), str(w.with_suffix('.pdiparams'))) - if cuda: - config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) - predictor = pdi.create_predictor(config) - input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) - output_names = predictor.get_output_names() - metadata = w.parents[1] / 'metadata.yaml' - elif ncnn: # PaddlePaddle - raise NotImplementedError('YOLOv8 NCNN inference is not currently supported.') - elif triton: # NVIDIA Triton Inference Server - LOGGER.info('Triton Inference Server not supported...') - ''' - TODO: - check_requirements('tritonclient[all]') - from utils.triton import TritonRemoteModel - model = TritonRemoteModel(url=w) - nhwc = model.runtime.startswith("tensorflow") - ''' - else: - from ultralytics.yolo.engine.exporter import export_formats - raise TypeError(f"model='{w}' is not a supported model format. " - 'See https://docs.ultralytics.com/modes/predict for help.' - f'\n\n{export_formats()}') - - # Load external metadata YAML - if isinstance(metadata, (str, Path)) and Path(metadata).exists(): - metadata = yaml_load(metadata) - if metadata: - for k, v in metadata.items(): - if k in ('stride', 'batch'): - metadata[k] = int(v) - elif k in ('imgsz', 'names', 'kpt_shape') and isinstance(v, str): - metadata[k] = eval(v) - stride = metadata['stride'] - task = metadata['task'] - batch = metadata['batch'] - imgsz = metadata['imgsz'] - names = metadata['names'] - kpt_shape = metadata.get('kpt_shape') - elif not (pt or triton or nn_module): - LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'") - - # Check names - if 'names' not in locals(): # names missing - names = self._apply_default_class_names(data) - names = check_class_names(names) - - self.__dict__.update(locals()) # assign all variables to self - - def forward(self, im, augment=False, visualize=False): - """ - Runs inference on the YOLOv8 MultiBackend model. - - Args: - im (torch.Tensor): The image tensor to perform inference on. - augment (bool): whether to perform data augmentation during inference, defaults to False - visualize (bool): whether to visualize the output predictions, defaults to False - - Returns: - (tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True) - """ - b, ch, h, w = im.shape # batch, channel, height, width - if self.fp16 and im.dtype != torch.float16: - im = im.half() # to FP16 - if self.nhwc: - im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) - - if self.pt or self.nn_module: # PyTorch - y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) - elif self.jit: # TorchScript - y = self.model(im) - elif self.dnn: # ONNX OpenCV DNN - im = im.cpu().numpy() # torch to numpy - self.net.setInput(im) - y = self.net.forward() - elif self.onnx: # ONNX Runtime - im = im.cpu().numpy() # torch to numpy - y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) - elif self.xml: # OpenVINO - im = im.cpu().numpy() # FP32 - y = list(self.executable_network([im]).values()) - elif self.engine: # TensorRT - if self.dynamic and im.shape != self.bindings['images'].shape: - i = self.model.get_binding_index('images') - self.context.set_binding_shape(i, im.shape) # reshape if dynamic - self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) - for name in self.output_names: - i = self.model.get_binding_index(name) - self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) - s = self.bindings['images'].shape - assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" - self.binding_addrs['images'] = int(im.data_ptr()) - self.context.execute_v2(list(self.binding_addrs.values())) - y = [self.bindings[x].data for x in sorted(self.output_names)] - elif self.coreml: # CoreML - im = im[0].cpu().numpy() - im_pil = Image.fromarray((im * 255).astype('uint8')) - # im = im.resize((192, 320), Image.BILINEAR) - y = self.model.predict({'image': im_pil}) # coordinates are xywh normalized - if 'confidence' in y: - box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels - conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) - y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) - elif len(y) == 1: # classification model - y = list(y.values()) - elif len(y) == 2: # segmentation model - y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) - elif self.paddle: # PaddlePaddle - im = im.cpu().numpy().astype(np.float32) - self.input_handle.copy_from_cpu(im) - self.predictor.run() - y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] - elif self.triton: # NVIDIA Triton Inference Server - y = self.model(im) - else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) - im = im.cpu().numpy() - if self.saved_model: # SavedModel - y = self.model(im, training=False) if self.keras else self.model(im) - if not isinstance(y, list): - y = [y] - elif self.pb: # GraphDef - y = self.frozen_func(x=self.tf.constant(im)) - if len(y) == 2 and len(self.names) == 999: # segments and names not defined - ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes - nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) - self.names = {i: f'class{i}' for i in range(nc)} - else: # Lite or Edge TPU - input = self.input_details[0] - int8 = input['dtype'] == np.int8 # is TFLite quantized int8 model - if int8: - scale, zero_point = input['quantization'] - im = (im / scale + zero_point).astype(np.int8) # de-scale - self.interpreter.set_tensor(input['index'], im) - self.interpreter.invoke() - y = [] - for output in self.output_details: - x = self.interpreter.get_tensor(output['index']) - if int8: - scale, zero_point = output['quantization'] - x = (x.astype(np.float32) - zero_point) * scale # re-scale - y.append(x) - # TF segment fixes: export is reversed vs ONNX export and protos are transposed - if len(y) == 2: # segment with (det, proto) output order reversed - if len(y[1].shape) != 4: - y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) - y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160) - y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] - # y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels - - # for x in y: - # print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes - if isinstance(y, (list, tuple)): - return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] - else: - return self.from_numpy(y) - - def from_numpy(self, x): - """ - Convert a numpy array to a tensor. - - Args: - x (np.ndarray): The array to be converted. - - Returns: - (torch.Tensor): The converted tensor - """ - return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x - - def warmup(self, imgsz=(1, 3, 640, 640)): - """ - Warm up the model by running one forward pass with a dummy input. - - Args: - imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width) - - Returns: - (None): This method runs the forward pass and don't return any value - """ - warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module - if any(warmup_types) and (self.device.type != 'cpu' or self.triton): - im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input - for _ in range(2 if self.jit else 1): # - self.forward(im) # warmup - - @staticmethod - def _apply_default_class_names(data): - """Applies default class names to an input YAML file or returns numerical class names.""" - with contextlib.suppress(Exception): - return yaml_load(check_yaml(data))['names'] - return {i: f'class{i}' for i in range(999)} # return default if above errors - - @staticmethod - def _model_type(p='path/to/model.pt'): - """ - This function takes a path to a model file and returns the model type - - Args: - p: path to the model file. Defaults to path/to/model.pt - """ - # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx - # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] - from ultralytics.yolo.engine.exporter import export_formats - sf = list(export_formats().Suffix) # export suffixes - if not is_url(p, check=False) and not isinstance(p, str): - check_suffix(p, sf) # checks - url = urlparse(p) # if url may be Triton inference server - types = [s in Path(p).name for s in sf] - types[8] &= not types[9] # tflite &= not edgetpu - triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) - return types + [triton] diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/modules/prompt_predictor.py b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/modules/prompt_predictor.py deleted file mode 100644 index bf89893458532c928568693707b16312f19237f7..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/vit/sam/modules/prompt_predictor.py +++ /dev/null @@ -1,242 +0,0 @@ -# Ultralytics YOLO 🚀, AGPL-3.0 license - -from typing import Optional, Tuple - -import numpy as np -import torch - -from ..autosize import ResizeLongestSide -from .sam import Sam - - -class PromptPredictor: - - def __init__(self, sam_model: Sam) -> None: - """ - Uses SAM to calculate the image embedding for an image, and then - allow repeated, efficient mask prediction given prompts. - - Arguments: - sam_model (Sam): The model to use for mask prediction. - """ - super().__init__() - self.model = sam_model - self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) - self.reset_image() - - def set_image(self, image: np.ndarray, image_format: str = 'RGB') -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. - - Arguments: - image (np.ndarray): The image for calculating masks. Expects an - image in HWC uint8 format, with pixel values in [0, 255]. - image_format (str): The color format of the image, in ['RGB', 'BGR']. - """ - assert image_format in {'RGB', 'BGR'}, f"image_format must be in ['RGB', 'BGR'], is {image_format}." - if image_format != self.model.image_format: - image = image[..., ::-1] - - # Transform the image to the form expected by the model - input_image = self.transform.apply_image(image) - input_image_torch = torch.as_tensor(input_image, device=self.device) - input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] - - self.set_torch_image(input_image_torch, image.shape[:2]) - - @torch.no_grad() - def set_torch_image(self, transformed_image: torch.Tensor, original_image_size: Tuple[int, ...]) -> None: - """ - Calculates the image embeddings for the provided image, allowing - masks to be predicted with the 'predict' method. Expects the input - image to be already transformed to the format expected by the model. - - Arguments: - transformed_image (torch.Tensor): The input image, with shape - 1x3xHxW, which has been transformed with ResizeLongestSide. - original_image_size (tuple(int, int)): The size of the image - before transformation, in (H, W) format. - """ - if len(transformed_image.shape) != 4 \ - or transformed_image.shape[1] != 3 \ - or max(*transformed_image.shape[2:]) != self.model.image_encoder.img_size: - raise ValueError('set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.') - self.reset_image() - - self.original_size = original_image_size - self.input_size = tuple(transformed_image.shape[-2:]) - input_image = self.model.preprocess(transformed_image) - self.features = self.model.image_encoder(input_image) - self.is_image_set = True - - def predict( - self, - point_coords: Optional[np.ndarray] = None, - point_labels: Optional[np.ndarray] = None, - box: Optional[np.ndarray] = None, - mask_input: Optional[np.ndarray] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: - """ - Predict masks for the given input prompts, using the currently set image. - - Arguments: - point_coords (np.ndarray, None): A Nx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (np.ndarray, None): A length N array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - box (np.ndarray, None): A length 4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form 1xHxW, where - for SAM, H=W=256. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (np.ndarray): The output masks in CxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (np.ndarray): An array of length C containing the model's - predictions for the quality of each mask. - (np.ndarray): An array of shape CxHxW, where C is the number - of masks and H=W=256. These low resolution logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') - - # Transform input prompts - coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None - if point_coords is not None: - assert (point_labels is not None), 'point_labels must be supplied if point_coords is supplied.' - point_coords = self.transform.apply_coords(point_coords, self.original_size) - coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) - labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) - coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] - if box is not None: - box = self.transform.apply_boxes(box, self.original_size) - box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) - box_torch = box_torch[None, :] - if mask_input is not None: - mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) - mask_input_torch = mask_input_torch[None, :, :, :] - - masks, iou_predictions, low_res_masks = self.predict_torch( - coords_torch, - labels_torch, - box_torch, - mask_input_torch, - multimask_output, - return_logits=return_logits, - ) - - masks_np = masks[0].detach().cpu().numpy() - iou_predictions_np = iou_predictions[0].detach().cpu().numpy() - low_res_masks_np = low_res_masks[0].detach().cpu().numpy() - return masks_np, iou_predictions_np, low_res_masks_np - - @torch.no_grad() - def predict_torch( - self, - point_coords: Optional[torch.Tensor], - point_labels: Optional[torch.Tensor], - boxes: Optional[torch.Tensor] = None, - mask_input: Optional[torch.Tensor] = None, - multimask_output: bool = True, - return_logits: bool = False, - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """ - Predict masks for the given input prompts, using the currently set image. - Input prompts are batched torch tensors and are expected to already be - transformed to the input frame using ResizeLongestSide. - - Arguments: - point_coords (torch.Tensor, None): A BxNx2 array of point prompts to the - model. Each point is in (X,Y) in pixels. - point_labels (torch.Tensor, None): A BxN array of labels for the - point prompts. 1 indicates a foreground point and 0 indicates a - background point. - boxes (np.ndarray, None): A Bx4 array given a box prompt to the - model, in XYXY format. - mask_input (np.ndarray): A low resolution mask input to the model, typically - coming from a previous prediction iteration. Has form Bx1xHxW, where - for SAM, H=W=256. Masks returned by a previous iteration of the - predict method do not need further transformation. - multimask_output (bool): If true, the model will return three masks. - For ambiguous input prompts (such as a single click), this will often - produce better masks than a single prediction. If only a single - mask is needed, the model's predicted quality score can be used - to select the best mask. For non-ambiguous prompts, such as multiple - input prompts, multimask_output=False can give better results. - return_logits (bool): If true, returns un-thresholded masks logits - instead of a binary mask. - - Returns: - (torch.Tensor): The output masks in BxCxHxW format, where C is the - number of masks, and (H, W) is the original image size. - (torch.Tensor): An array of shape BxC containing the model's - predictions for the quality of each mask. - (torch.Tensor): An array of shape BxCxHxW, where C is the number - of masks and H=W=256. These low res logits can be passed to - a subsequent iteration as mask input. - """ - if not self.is_image_set: - raise RuntimeError('An image must be set with .set_image(...) before mask prediction.') - - points = (point_coords, point_labels) if point_coords is not None else None - # Embed prompts - sparse_embeddings, dense_embeddings = self.model.prompt_encoder( - points=points, - boxes=boxes, - masks=mask_input, - ) - - # Predict masks - low_res_masks, iou_predictions = self.model.mask_decoder( - image_embeddings=self.features, - image_pe=self.model.prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - ) - - # Upscale the masks to the original image resolution - masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) - - if not return_logits: - masks = masks > self.model.mask_threshold - - return masks, iou_predictions, low_res_masks - - def get_image_embedding(self) -> torch.Tensor: - """ - Returns the image embeddings for the currently set image, with - shape 1xCxHxW, where C is the embedding dimension and (H,W) are - the embedding spatial dimension of SAM (typically C=256, H=W=64). - """ - if not self.is_image_set: - raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.') - assert self.features is not None, 'Features must exist if an image has been set.' - return self.features - - @property - def device(self) -> torch.device: - return self.model.device - - def reset_image(self) -> None: - """Resets the currently set image.""" - self.is_image_set = False - self.features = None - self.orig_h = None - self.orig_w = None - self.input_h = None - self.input_w = None diff --git a/spaces/vialibre/edia_lmodels_en/modules/module_connection.py b/spaces/vialibre/edia_lmodels_en/modules/module_connection.py deleted file mode 100644 index a59de986636ff8fdccf4d85f12065260d2e63a84..0000000000000000000000000000000000000000 --- a/spaces/vialibre/edia_lmodels_en/modules/module_connection.py +++ /dev/null @@ -1,122 +0,0 @@ -from abc import ABC -from modules.module_rankSents import RankSents -from modules.module_crowsPairs import CrowsPairs -from typing import List, Tuple - -class Connector(ABC): - def parse_word( - self, - word: str - ) -> str: - - return word.lower().strip() - - def parse_words( - self, - array_in_string: str - ) -> List[str]: - - words = array_in_string.strip() - if not words: - return [] - - words = [ - self.parse_word(word) - for word in words.split(',') if word.strip() != '' - ] - return words - - def process_error( - self, - err: str - ) -> str: - - if err: - err = "

          " + err + "

          " - return err - -class PhraseBiasExplorerConnector(Connector): - def __init__( - self, - **kwargs - ) -> None: - - language_model = kwargs.get('language_model', None) - lang = kwargs.get('lang', None) - if language_model is None or lang is None: - raise KeyError - - self.phrase_bias_explorer = RankSents( - language_model=language_model, - lang=lang - ) - - def rank_sentence_options( - self, - sent: str, - word_list: str, - banned_word_list: str, - useArticles: bool, - usePrepositions: bool, - useConjunctions: bool - ) -> Tuple: - - sent = " ".join(sent.strip().replace("*"," * ").split()) - - err = self.phrase_bias_explorer.errorChecking(sent) - if err: - return self.process_error(err), "", "" - - word_list = self.parse_words(word_list) - banned_word_list = self.parse_words(banned_word_list) - - all_plls_scores = self.phrase_bias_explorer.rank( - sent, - word_list, - banned_word_list, - useArticles, - usePrepositions, - useConjunctions - ) - - all_plls_scores = self.phrase_bias_explorer.Label.compute(all_plls_scores) - return self.process_error(err), all_plls_scores, "" - -class CrowsPairsExplorerConnector(Connector): - def __init__( - self, - **kwargs - ) -> None: - - language_model = kwargs.get('language_model', None) - if language_model is None: - raise KeyError - - self.crows_pairs_explorer = CrowsPairs( - language_model=language_model - ) - - def compare_sentences( - self, - sent0: str, - sent1: str, - sent2: str, - sent3: str, - sent4: str, - sent5: str - ) -> Tuple: - - sent_list = [sent0, sent1, sent2, sent3, sent4, sent5] - err = self.crows_pairs_explorer.errorChecking( - sent_list - ) - - if err: - return self.process_error(err), "", "" - - all_plls_scores = self.crows_pairs_explorer.rank( - sent_list - ) - - all_plls_scores = self.crows_pairs_explorer.Label.compute(all_plls_scores) - return self.process_error(err), all_plls_scores, "" \ No newline at end of file diff --git a/spaces/vishnu0001/text2mesh/shap_e/models/stf/renderer.py b/spaces/vishnu0001/text2mesh/shap_e/models/stf/renderer.py deleted file mode 100644 index 099de74b21492c28aaadc3dd220b64b01fc6647f..0000000000000000000000000000000000000000 --- a/spaces/vishnu0001/text2mesh/shap_e/models/stf/renderer.py +++ /dev/null @@ -1,507 +0,0 @@ -import warnings -from abc import ABC, abstractmethod -from functools import partial -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F - -from shap_e.models.nn.camera import DifferentiableCamera, DifferentiableProjectiveCamera -from shap_e.models.nn.meta import subdict -from shap_e.models.nn.utils import to_torch -from shap_e.models.query import Query -from shap_e.models.renderer import Renderer, get_camera_from_batch -from shap_e.models.volume import BoundingBoxVolume, Volume -from shap_e.rendering.blender.constants import BASIC_AMBIENT_COLOR, BASIC_DIFFUSE_COLOR -from shap_e.rendering.mc import marching_cubes -from shap_e.rendering.torch_mesh import TorchMesh -from shap_e.rendering.view_data import ProjectiveCamera -from shap_e.util.collections import AttrDict - -from .base import Model - - -class STFRendererBase(ABC): - @abstractmethod - def get_signed_distance( - self, - position: torch.Tensor, - params: Dict[str, torch.Tensor], - options: AttrDict[str, Any], - ) -> torch.Tensor: - pass - - @abstractmethod - def get_texture( - self, - position: torch.Tensor, - params: Dict[str, torch.Tensor], - options: AttrDict[str, Any], - ) -> torch.Tensor: - pass - - -class STFRenderer(Renderer, STFRendererBase): - def __init__( - self, - sdf: Model, - tf: Model, - volume: Volume, - grid_size: int, - texture_channels: Sequence[str] = ("R", "G", "B"), - channel_scale: Sequence[float] = (255.0, 255.0, 255.0), - ambient_color: Union[float, Tuple[float]] = BASIC_AMBIENT_COLOR, - diffuse_color: Union[float, Tuple[float]] = BASIC_DIFFUSE_COLOR, - specular_color: Union[float, Tuple[float]] = 0.0, - output_srgb: bool = True, - device: torch.device = torch.device("cuda"), - **kwargs, - ): - super().__init__(**kwargs) - assert isinstance(volume, BoundingBoxVolume), "cannot sample points in unknown volume" - self.sdf = sdf - self.tf = tf - self.volume = volume - self.grid_size = grid_size - self.texture_channels = texture_channels - self.channel_scale = to_torch(channel_scale).to(device) - self.ambient_color = ambient_color - self.diffuse_color = diffuse_color - self.specular_color = specular_color - self.output_srgb = output_srgb - self.device = device - self.to(device) - - def render_views( - self, - batch: Dict, - params: Optional[Dict] = None, - options: Optional[Dict] = None, - ) -> AttrDict: - params = self.update(params) - options = AttrDict() if not options else AttrDict(options) - - sdf_fn = partial(self.sdf.forward_batched, params=subdict(params, "sdf")) - tf_fn = partial(self.tf.forward_batched, params=subdict(params, "tf")) - nerstf_fn = None - - return render_views_from_stf( - batch, - options, - sdf_fn=sdf_fn, - tf_fn=tf_fn, - nerstf_fn=nerstf_fn, - volume=self.volume, - grid_size=self.grid_size, - channel_scale=self.channel_scale, - texture_channels=self.texture_channels, - ambient_color=self.ambient_color, - diffuse_color=self.diffuse_color, - specular_color=self.specular_color, - output_srgb=self.output_srgb, - device=self.device, - ) - - def get_signed_distance( - self, - query: Query, - params: Dict[str, torch.Tensor], - options: AttrDict[str, Any], - ) -> torch.Tensor: - return self.sdf( - query, - params=subdict(params, "sdf"), - options=options, - ).signed_distance - - def get_texture( - self, - query: Query, - params: Dict[str, torch.Tensor], - options: AttrDict[str, Any], - ) -> torch.Tensor: - return self.tf( - query, - params=subdict(params, "tf"), - options=options, - ).channels - - -def render_views_from_stf( - batch: Dict, - options: AttrDict[str, Any], - *, - sdf_fn: Optional[Callable], - tf_fn: Optional[Callable], - nerstf_fn: Optional[Callable], - volume: BoundingBoxVolume, - grid_size: int, - channel_scale: torch.Tensor, - texture_channels: Sequence[str] = ("R", "G", "B"), - ambient_color: Union[float, Tuple[float]] = 0.0, - diffuse_color: Union[float, Tuple[float]] = 1.0, - specular_color: Union[float, Tuple[float]] = 0.2, - output_srgb: bool = False, - device: torch.device = torch.device("cuda"), -) -> AttrDict: - """ - :param batch: contains either ["poses", "camera"], or ["cameras"]. Can - optionally contain any of ["height", "width", "query_batch_size"] - :param options: controls checkpointing, caching, and rendering - :param sdf_fn: returns [batch_size, query_batch_size, n_output] where - n_output >= 1. - :param tf_fn: returns [batch_size, query_batch_size, n_channels] - :param volume: AABB volume - :param grid_size: SDF sampling resolution - :param texture_channels: what texture to predict - :param channel_scale: how each channel is scaled - :return: at least - channels: [batch_size, len(cameras), height, width, 3] - transmittance: [batch_size, len(cameras), height, width, 1] - aux_losses: AttrDict[str, torch.Tensor] - """ - camera, batch_size, inner_shape = get_camera_from_batch(batch) - inner_batch_size = int(np.prod(inner_shape)) - assert camera.width == camera.height, "only square views are supported" - assert camera.x_fov == camera.y_fov, "only square views are supported" - assert isinstance(camera, DifferentiableProjectiveCamera) - - device = camera.origin.device - device_type = device.type - - TO_CACHE = ["fields", "raw_meshes", "raw_signed_distance", "raw_density", "mesh_mask", "meshes"] - if options.cache is not None and all(key in options.cache for key in TO_CACHE): - fields = options.cache.fields - raw_meshes = options.cache.raw_meshes - raw_signed_distance = options.cache.raw_signed_distance - raw_density = options.cache.raw_density - mesh_mask = options.cache.mesh_mask - else: - query_batch_size = batch.get("query_batch_size", batch.get("ray_batch_size", 4096)) - query_points = volume_query_points(volume, grid_size) - fn = nerstf_fn if sdf_fn is None else sdf_fn - sdf_out = fn( - query=Query(position=query_points[None].repeat(batch_size, 1, 1)), - query_batch_size=query_batch_size, - options=options, - ) - raw_signed_distance = sdf_out.signed_distance - raw_density = None - if "density" in sdf_out: - raw_density = sdf_out.density - with torch.autocast(device_type, enabled=False): - fields = sdf_out.signed_distance.float() - raw_signed_distance = sdf_out.signed_distance - assert ( - len(fields.shape) == 3 and fields.shape[-1] == 1 - ), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}" - fields = fields.reshape(batch_size, *([grid_size] * 3)) - - # Force a negative border around the SDFs to close off all the models. - full_grid = torch.zeros( - batch_size, - grid_size + 2, - grid_size + 2, - grid_size + 2, - device=fields.device, - dtype=fields.dtype, - ) - full_grid.fill_(-1.0) - full_grid[:, 1:-1, 1:-1, 1:-1] = fields - fields = full_grid - - raw_meshes = [] - mesh_mask = [] - for field in fields: - raw_mesh = marching_cubes(field, volume.bbox_min, volume.bbox_max - volume.bbox_min) - if len(raw_mesh.faces) == 0: - # DDP deadlocks when there are unused parameters on some ranks - # and not others, so we make sure the field is a dependency in - # the graph regardless of empty meshes. - vertex_dependency = field.mean() - raw_mesh = TorchMesh( - verts=torch.zeros(3, 3, device=device) + vertex_dependency, - faces=torch.tensor([[0, 1, 2]], dtype=torch.long, device=device), - ) - # Make sure we only feed back zero gradients to the field - # by masking out the final renderings of this mesh. - mesh_mask.append(False) - else: - mesh_mask.append(True) - raw_meshes.append(raw_mesh) - mesh_mask = torch.tensor(mesh_mask, device=device) - - max_vertices = max(len(m.verts) for m in raw_meshes) - - fn = nerstf_fn if tf_fn is None else tf_fn - tf_out = fn( - query=Query( - position=torch.stack( - [m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes], - dim=0, - ) - ), - query_batch_size=query_batch_size, - options=options, - ) - - if "cache" in options: - options.cache.fields = fields - options.cache.raw_meshes = raw_meshes - options.cache.raw_signed_distance = raw_signed_distance - options.cache.raw_density = raw_density - options.cache.mesh_mask = mesh_mask - - if output_srgb: - tf_out.channels = _convert_srgb_to_linear(tf_out.channels) - - # Make sure the raw meshes have colors. - with torch.autocast(device_type, enabled=False): - textures = tf_out.channels.float() - assert len(textures.shape) == 3 and textures.shape[-1] == len( - texture_channels - ), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}" - for m, texture in zip(raw_meshes, textures): - texture = texture[: len(m.verts)] - m.vertex_channels = {name: ch for name, ch in zip(texture_channels, texture.unbind(-1))} - - args = dict( - options=options, - texture_channels=texture_channels, - ambient_color=ambient_color, - diffuse_color=diffuse_color, - specular_color=specular_color, - camera=camera, - batch_size=batch_size, - inner_batch_size=inner_batch_size, - inner_shape=inner_shape, - raw_meshes=raw_meshes, - tf_out=tf_out, - ) - - try: - out = _render_with_pytorch3d(**args) - except ModuleNotFoundError as exc: - warnings.warn(f"exception rendering with PyTorch3D: {exc}") - warnings.warn( - "falling back on native PyTorch renderer, which does not support full gradients" - ) - out = _render_with_raycast(**args) - - # Apply mask to prevent gradients for empty meshes. - reshaped_mask = mesh_mask.view([-1] + [1] * (len(out.channels.shape) - 1)) - out.channels = torch.where(reshaped_mask, out.channels, torch.zeros_like(out.channels)) - out.transmittance = torch.where( - reshaped_mask, out.transmittance, torch.ones_like(out.transmittance) - ) - - if output_srgb: - out.channels = _convert_linear_to_srgb(out.channels) - out.channels = out.channels * (1 - out.transmittance) * channel_scale.view(-1) - - # This might be useful information to have downstream - out.raw_meshes = raw_meshes - out.fields = fields - out.mesh_mask = mesh_mask - out.raw_signed_distance = raw_signed_distance - out.aux_losses = AttrDict(cross_entropy=cross_entropy_sdf_loss(fields)) - if raw_density is not None: - out.raw_density = raw_density - - return out - - -def _render_with_pytorch3d( - options: AttrDict, - texture_channels: Sequence[str], - ambient_color: Union[float, Tuple[float]], - diffuse_color: Union[float, Tuple[float]], - specular_color: Union[float, Tuple[float]], - camera: DifferentiableCamera, - batch_size: int, - inner_shape: Sequence[int], - inner_batch_size: int, - raw_meshes: List[TorchMesh], - tf_out: AttrDict, -): - _ = tf_out - - # Lazy import because pytorch3d is installed lazily. - from shap_e.rendering.pytorch3d_util import ( - blender_uniform_lights, - convert_cameras_torch, - convert_meshes, - render_images, - ) - - n_channels = len(texture_channels) - device = camera.origin.device - device_type = device.type - - with torch.autocast(device_type, enabled=False): - meshes = convert_meshes(raw_meshes) - - lights = blender_uniform_lights( - batch_size, - device, - ambient_color=ambient_color, - diffuse_color=diffuse_color, - specular_color=specular_color, - ) - - # Separate camera intrinsics for each view, so that we can - # create a new camera for each batch of views. - cam_shape = [batch_size, inner_batch_size, -1] - position = camera.origin.reshape(cam_shape) - x = camera.x.reshape(cam_shape) - y = camera.y.reshape(cam_shape) - z = camera.z.reshape(cam_shape) - - results = [] - for i in range(inner_batch_size): - sub_cams = convert_cameras_torch( - position[:, i], x[:, i], y[:, i], z[:, i], fov=camera.x_fov - ) - imgs = render_images( - camera.width, - meshes, - sub_cams, - lights, - use_checkpoint=options.checkpoint_render, - **options.get("render_options", {}), - ) - results.append(imgs) - views = torch.stack(results, dim=1) - views = views.view(batch_size, *inner_shape, camera.height, camera.width, n_channels + 1) - - out = AttrDict( - channels=views[..., :-1], # [batch_size, *inner_shape, height, width, n_channels] - transmittance=1 - views[..., -1:], # [batch_size, *inner_shape, height, width, 1] - meshes=meshes, - ) - - return out - - -def _render_with_raycast( - options: AttrDict, - texture_channels: Sequence[str], - ambient_color: Union[float, Tuple[float]], - diffuse_color: Union[float, Tuple[float]], - specular_color: Union[float, Tuple[float]], - camera: DifferentiableCamera, - batch_size: int, - inner_shape: Sequence[int], - inner_batch_size: int, - raw_meshes: List[TorchMesh], - tf_out: AttrDict, -): - assert np.mean(np.array(specular_color)) == 0 - - from shap_e.rendering.raycast.render import render_diffuse_mesh - from shap_e.rendering.raycast.types import TriMesh as TorchTriMesh - - device = camera.origin.device - device_type = device.type - - cam_shape = [batch_size, inner_batch_size, -1] - origin = camera.origin.reshape(cam_shape) - x = camera.x.reshape(cam_shape) - y = camera.y.reshape(cam_shape) - z = camera.z.reshape(cam_shape) - - with torch.autocast(device_type, enabled=False): - all_meshes = [] - for i, mesh in enumerate(raw_meshes): - all_meshes.append( - TorchTriMesh( - faces=mesh.faces.long(), - vertices=mesh.verts.float(), - vertex_colors=tf_out.channels[i, : len(mesh.verts)].float(), - ) - ) - all_images = [] - for i, mesh in enumerate(all_meshes): - for j in range(inner_batch_size): - all_images.append( - render_diffuse_mesh( - camera=ProjectiveCamera( - origin=origin[i, j].detach().cpu().numpy(), - x=x[i, j].detach().cpu().numpy(), - y=y[i, j].detach().cpu().numpy(), - z=z[i, j].detach().cpu().numpy(), - width=camera.width, - height=camera.height, - x_fov=camera.x_fov, - y_fov=camera.y_fov, - ), - mesh=mesh, - diffuse=float(np.array(diffuse_color).mean()), - ambient=float(np.array(ambient_color).mean()), - ray_batch_size=16, # low memory usage - checkpoint=options.checkpoint_render, - ) - ) - - n_channels = len(texture_channels) - views = torch.stack(all_images).view( - batch_size, *inner_shape, camera.height, camera.width, n_channels + 1 - ) - return AttrDict( - channels=views[..., :-1], # [batch_size, *inner_shape, height, width, n_channels] - transmittance=1 - views[..., -1:], # [batch_size, *inner_shape, height, width, 1] - meshes=all_meshes, - ) - - -def _convert_srgb_to_linear(u: torch.Tensor) -> torch.Tensor: - return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4) - - -def _convert_linear_to_srgb(u: torch.Tensor) -> torch.Tensor: - return torch.where(u <= 0.0031308, 12.92 * u, 1.055 * (u ** (1 / 2.4)) - 0.055) - - -def cross_entropy_sdf_loss(fields: torch.Tensor): - logits = F.logsigmoid(fields) - signs = (fields > 0).float() - - losses = [] - for dim in range(1, 4): - n = logits.shape[dim] - for (t_start, t_end, p_start, p_end) in [(0, -1, 1, n), (1, n, 0, -1)]: - targets = slice_fields(signs, dim, t_start, t_end) - preds = slice_fields(logits, dim, p_start, p_end) - losses.append( - F.binary_cross_entropy_with_logits(preds, targets, reduction="none") - .flatten(1) - .mean() - ) - return torch.stack(losses, dim=-1).sum() - - -def slice_fields(fields: torch.Tensor, dim: int, start: int, end: int): - if dim == 1: - return fields[:, start:end] - elif dim == 2: - return fields[:, :, start:end] - elif dim == 3: - return fields[:, :, :, start:end] - else: - raise ValueError(f"cannot slice dimension {dim}") - - -def volume_query_points( - volume: Volume, - grid_size: int, -): - assert isinstance(volume, BoundingBoxVolume) - indices = torch.arange(grid_size**3, device=volume.bbox_min.device) - zs = indices % grid_size - ys = torch.div(indices, grid_size, rounding_mode="trunc") % grid_size - xs = torch.div(indices, grid_size**2, rounding_mode="trunc") % grid_size - combined = torch.stack([xs, ys, zs], dim=1) - return (combined.float() / (grid_size - 1)) * ( - volume.bbox_max - volume.bbox_min - ) + volume.bbox_min diff --git a/spaces/vivym/image-matting-app/ppmatting/models/layers/__init__.py b/spaces/vivym/image-matting-app/ppmatting/models/layers/__init__.py deleted file mode 100644 index 31eba2cacd64eddaf0734495b5a992a86b7bad37..0000000000000000000000000000000000000000 --- a/spaces/vivym/image-matting-app/ppmatting/models/layers/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from .gca_module import GuidedCxtAtten diff --git a/spaces/vonbarnekowa/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py b/spaces/vonbarnekowa/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 808c7f882cb75e2ba2340d5b55881d11927351f0..0000000000000000000000000000000000000000 --- a/spaces/vonbarnekowa/stable-diffusion/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,651 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - if up: - image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/app.py b/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/app.py deleted file mode 100644 index d686b1db6e57ac3e1e62c1889236760e79c6667f..0000000000000000000000000000000000000000 --- a/spaces/vslasor/VLS10-VideoAudioSummarizer-GR/app.py +++ /dev/null @@ -1,30 +0,0 @@ -import gradio as gr -from summarize import Summarizer - -interface = gr.Interface(fn = Summarizer, - inputs = [gr.inputs.Textbox(lines=2, - placeholder="Enter your link...", - label='YouTube Video Link'), - gr.inputs.Radio(["mT5", "BART"], type="value", label='Model')], - outputs = [gr.outputs.Textbox( - label="Summary")], - - title = "Video Summary Generator", - examples = [ - ['https://www.youtube.com/watch?v=cdiD-9MMpb0', 'BART'], - ['https://www.youtube.com/watch?v=p3lsYlod5OU&t=5202s', 'BART'], - ['https://www.youtube.com/watch?v=Gfr50f6ZBvo&t=1493s', 'BART'], - ['https://www.youtube.com/watch?v=4oDZyOf6CW4&t=3149s', 'BART'], - ['https://www.youtube.com/watch?v=lvh3g7eszVQ&t=291s', 'mT5'], - ['https://www.youtube.com/watch?v=OaeYUm06in0', 'mT5'], - ['https://www.youtube.com/watch?v=ZecQ64l-gKM&t=545s', 'mT5'], - ['https://www.youtube.com/watch?v=5zOHSysMmH0&t=5798s', 'mT5'], - ['https://www.youtube.com/watch?v=X0-SXS6zdEQ&t=23s', 'mT5'], - ['https://www.youtube.com/watch?v=gFEE3w7F0ww&t=18s', 'mT5'], - ['https://www.youtube.com/watch?v=Z1KwkpTUbkg&t=30s', 'mT5'], - ['https://www.youtube.com/watch?v=rIpUf-Vy2JA&t=3542s', 'mT5'], - ['https://www.youtube.com/watch?v=bgNzUxyS-kQ&t=3631s', 'mT5'] - ], - enable_queue=True) - -interface.launch(debug=True) \ No newline at end of file diff --git a/spaces/vumichien/Generate_human_motion/VQ-Trans/options/option_vq.py b/spaces/vumichien/Generate_human_motion/VQ-Trans/options/option_vq.py deleted file mode 100644 index 08a53ff1270facc10ab44ec0647e673ed1336d0d..0000000000000000000000000000000000000000 --- a/spaces/vumichien/Generate_human_motion/VQ-Trans/options/option_vq.py +++ /dev/null @@ -1,61 +0,0 @@ -import argparse - -def get_args_parser(): - parser = argparse.ArgumentParser(description='Optimal Transport AutoEncoder training for AIST', - add_help=True, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - ## dataloader - parser.add_argument('--dataname', type=str, default='kit', help='dataset directory') - parser.add_argument('--batch-size', default=128, type=int, help='batch size') - parser.add_argument('--window-size', type=int, default=64, help='training motion length') - - ## optimization - parser.add_argument('--total-iter', default=200000, type=int, help='number of total iterations to run') - parser.add_argument('--warm-up-iter', default=1000, type=int, help='number of total iterations for warmup') - parser.add_argument('--lr', default=2e-4, type=float, help='max learning rate') - parser.add_argument('--lr-scheduler', default=[50000, 400000], nargs="+", type=int, help="learning rate schedule (iterations)") - parser.add_argument('--gamma', default=0.05, type=float, help="learning rate decay") - - parser.add_argument('--weight-decay', default=0.0, type=float, help='weight decay') - parser.add_argument("--commit", type=float, default=0.02, help="hyper-parameter for the commitment loss") - parser.add_argument('--loss-vel', type=float, default=0.1, help='hyper-parameter for the velocity loss') - parser.add_argument('--recons-loss', type=str, default='l2', help='reconstruction loss') - - ## vqvae arch - parser.add_argument("--code-dim", type=int, default=512, help="embedding dimension") - parser.add_argument("--nb-code", type=int, default=512, help="nb of embedding") - parser.add_argument("--mu", type=float, default=0.99, help="exponential moving average to update the codebook") - parser.add_argument("--down-t", type=int, default=2, help="downsampling rate") - parser.add_argument("--stride-t", type=int, default=2, help="stride size") - parser.add_argument("--width", type=int, default=512, help="width of the network") - parser.add_argument("--depth", type=int, default=3, help="depth of the network") - parser.add_argument("--dilation-growth-rate", type=int, default=3, help="dilation growth rate") - parser.add_argument("--output-emb-width", type=int, default=512, help="output embedding width") - parser.add_argument('--vq-act', type=str, default='relu', choices = ['relu', 'silu', 'gelu'], help='dataset directory') - parser.add_argument('--vq-norm', type=str, default=None, help='dataset directory') - - ## quantizer - parser.add_argument("--quantizer", type=str, default='ema_reset', choices = ['ema', 'orig', 'ema_reset', 'reset'], help="eps for optimal transport") - parser.add_argument('--beta', type=float, default=1.0, help='commitment loss in standard VQ') - - ## resume - parser.add_argument("--resume-pth", type=str, default=None, help='resume pth for VQ') - parser.add_argument("--resume-gpt", type=str, default=None, help='resume pth for GPT') - - - ## output directory - parser.add_argument('--out-dir', type=str, default='output_vqfinal/', help='output directory') - parser.add_argument('--results-dir', type=str, default='visual_results/', help='output directory') - parser.add_argument('--visual-name', type=str, default='baseline', help='output directory') - parser.add_argument('--exp-name', type=str, default='exp_debug', help='name of the experiment, will create a file inside out-dir') - ## other - parser.add_argument('--print-iter', default=200, type=int, help='print frequency') - parser.add_argument('--eval-iter', default=1000, type=int, help='evaluation frequency') - parser.add_argument('--seed', default=123, type=int, help='seed for initializing training.') - - parser.add_argument('--vis-gt', action='store_true', help='whether visualize GT motions') - parser.add_argument('--nb-vis', default=20, type=int, help='nb of visualizations') - - - return parser.parse_args() \ No newline at end of file diff --git a/spaces/wanghuoto/gogoai/src/pages/api/blob.ts b/spaces/wanghuoto/gogoai/src/pages/api/blob.ts deleted file mode 100644 index fecd48031916b2284b8958892196e0a1ad420421..0000000000000000000000000000000000000000 --- a/spaces/wanghuoto/gogoai/src/pages/api/blob.ts +++ /dev/null @@ -1,40 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { Readable } from 'node:stream' -import { fetch } from '@/lib/isomorphic' - -const API_DOMAIN = 'https://www.bing.com' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { bcid } = req.query - - const { headers, body } = await fetch(`${API_DOMAIN}/images/blob?bcid=${bcid}`, - { - method: 'GET', - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referrer-Policy": "origin-when-cross-origin", - }, - }, - ) - - res.writeHead(200, { - 'Content-Length': headers.get('content-length')!, - 'Content-Type': headers.get('content-type')!, - }) - // @ts-ignore - return Readable.fromWeb(body!).pipe(res) - } catch (e) { - console.log('Error', e) - return res.json({ - result: { - value: 'UploadFailed', - message: `${e}` - } - }) - } -} diff --git a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/product_manager.py b/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/product_manager.py deleted file mode 100644 index b42e9bb294484d57aa38a01e23ef98104483a5c6..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/metagpt/roles/product_manager.py +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 14:43 -@Author : alexanderwu -@File : product_manager.py -""" -from metagpt.actions import BossRequirement, WritePRD -from metagpt.roles import Role - - -class ProductManager(Role): - def __init__(self, name="Alice", profile="Product Manager", goal="Efficiently create a successful product", - constraints=""): - super().__init__(name, profile, goal, constraints) - self._init_actions([WritePRD]) - self._watch([BossRequirement]) diff --git a/spaces/wuhuik/bingo/src/components/learn-more.tsx b/spaces/wuhuik/bingo/src/components/learn-more.tsx deleted file mode 100644 index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000 --- a/spaces/wuhuik/bingo/src/components/learn-more.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import React from 'react' -import { SourceAttribution } from '@/lib/bots/bing/types' - -export interface LearnMoreProps { - sourceAttributions?: SourceAttribution[] -} - -export function LearnMore({ sourceAttributions }: LearnMoreProps) { - if (!sourceAttributions?.length) { - return null - } - - return ( -
          -
          了解详细信息:
          -
          -
          - {sourceAttributions.map((attribution, index) => { - const { providerDisplayName, seeMoreUrl } = attribution - const { host } = new URL(seeMoreUrl) - return ( - - {index + 1}. {host} - - ) - })} -
          -
          -
          - ) -} diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py deleted file mode 100644 index af04e614c8f1ac43faf363b1a9f6bfd667fbde21..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/ONNXVITS_infer.py +++ /dev/null @@ -1,201 +0,0 @@ -import torch -import commons -import models - -import math -from torch import nn -from torch.nn import functional as F - -import modules -import attentions - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emotion_embedding = emotion_embedding - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - if emotion_embedding: - self.emo_proj = nn.Linear(1024, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, emotion_embedding=None): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - if emotion_embedding is not None: - print("emotion added") - x = x + self.emo_proj(emotion_embedding.unsqueeze(1)) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class SynthesizerTrn(models.SynthesizerTrn): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - emotion_embedding=False, - ONNX_dir="./ONNX_net/", - **kwargs): - - super().__init__( - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=n_speakers, - gin_channels=gin_channels, - use_sdp=use_sdp, - **kwargs - ) - self.ONNX_dir = ONNX_dir - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - emotion_embedding) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None, - emotion_embedding=None): - from ONNXVITS_utils import runonnx - with torch.no_grad(): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, emotion_embedding) - - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - # logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - logw = runonnx(f"{self.ONNX_dir}dp.onnx", x=x.numpy(), x_mask=x_mask.numpy(), g=g.numpy()) - logw = torch.from_numpy(logw[0]) - - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - - # z = self.flow(z_p, y_mask, g=g, reverse=True) - z = runonnx(f"{self.ONNX_dir}flow.onnx", z_p=z_p.numpy(), y_mask=y_mask.numpy(), g=g.numpy()) - z = torch.from_numpy(z[0]) - - # o = self.dec((z * y_mask)[:,:,:max_len], g=g) - o = runonnx(f"{self.ONNX_dir}dec.onnx", z_in=(z * y_mask)[:, :, :max_len].numpy(), g=g.numpy()) - o = torch.from_numpy(o[0]) - - return o, attn, y_mask, (z, z_p, m_p, logs_p) \ No newline at end of file diff --git a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/misc.py b/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/misc.py deleted file mode 100644 index faf172fbb8a90ed49ca0de9a9ca1d875f2f96215..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Instruct-X-Decoder/xdecoder/language/misc.py +++ /dev/null @@ -1,64 +0,0 @@ -import random - -import nltk -nltk.data.path.append('/mnt/data/nltk_data') -import numpy as np - -from utils.constants import IMAGENET_DEFAULT_TEMPLATES - - -def get_tag(tokenized, tags): - if not isinstance(tags, (list, tuple)): - tags = [tags] - ret = [] - for (word, pos) in nltk.pos_tag(tokenized): - for tag in tags: - if pos == tag: - ret.append(word) - return ret - -def get_noun_phrase(tokenized): - # Taken from Su Nam Kim Paper... - grammar = r""" - NBAR: - {*} # Nouns and Adjectives, terminated with Nouns - - NP: - {} - {} # Above, connected with in/of/etc... - """ - chunker = nltk.RegexpParser(grammar) - - chunked = chunker.parse(nltk.pos_tag(tokenized)) - continuous_chunk = [] - current_chunk = [] - - for subtree in chunked: - if isinstance(subtree, nltk.Tree): - current_chunk.append(' '.join([token for token, pos in subtree.leaves()])) - elif current_chunk: - named_entity = ' '.join(current_chunk) - if named_entity not in continuous_chunk: - continuous_chunk.append(named_entity) - current_chunk = [] - else: - continue - - return continuous_chunk - -def text_noun_with_prompt_all(text, phrase_prob=0.0, append_text=True): - tokenized = nltk.word_tokenize(text) - - if random.random() >= phrase_prob: - nouns = get_tag(tokenized, ['NN', 'NNS', 'NNP']) - else: - nouns = get_noun_phrase(tokenized) - - - prompt_texts = [np.random.choice(IMAGENET_DEFAULT_TEMPLATES).format(noun) for noun in nouns] - - if append_text: - prompt_texts += [text] - nouns += [text] - - return prompt_texts, nouns \ No newline at end of file diff --git a/spaces/ygangang/CodeFormer/README.md b/spaces/ygangang/CodeFormer/README.md deleted file mode 100644 index 6fafbe6f03ca8588a58a159d4ab39fe2256c9d88..0000000000000000000000000000000000000000 --- a/spaces/ygangang/CodeFormer/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: CodeFormer -emoji: 🐼 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: sczhou/CodeFormer ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ygangang/VToonify/vtoonify/model/encoder/__init__.py b/spaces/ygangang/VToonify/vtoonify/model/encoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/configuration_electra.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/configuration_electra.py deleted file mode 100644 index d8e1de0fc97fa449c4941bc407fd689a7f50be7c..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/electra/configuration_electra.py +++ /dev/null @@ -1,198 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. -# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" ELECTRA model configuration""" - -from collections import OrderedDict -from typing import Mapping - -from ...configuration_utils import PretrainedConfig -from ...onnx import OnnxConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json", - "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json", - "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json", - "google/electra-small-discriminator": ( - "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json" - ), - "google/electra-base-discriminator": ( - "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json" - ), - "google/electra-large-discriminator": ( - "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json" - ), -} - - -class ElectraConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is - used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. - Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA - [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - - Args: - vocab_size (`int`, *optional*, defaults to 30522): - Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. - embedding_size (`int`, *optional*, defaults to 128): - Dimensionality of the encoder layers and the pooler layer. - hidden_size (`int`, *optional*, defaults to 256): - Dimensionality of the encoder layers and the pooler layer. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 4): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 1024): - Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"silu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - summary_type (`str`, *optional*, defaults to `"first"`): - Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. - - Has to be one of the following options: - - - `"last"`: Take the last token hidden state (like XLNet). - - `"first"`: Take the first token hidden state (like BERT). - - `"mean"`: Take the mean of all tokens hidden states. - - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - - `"attn"`: Not implemented now, use multi-head attention. - summary_use_proj (`bool`, *optional*, defaults to `True`): - Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. - - Whether or not to add a projection after the vector extraction. - summary_activation (`str`, *optional*): - Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. - - Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation. - summary_last_dropout (`float`, *optional*, defaults to 0.0): - Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. - - The dropout ratio to be used after the projection and activation. - position_embedding_type (`str`, *optional*, defaults to `"absolute"`): - Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For - positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to - [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). - For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models - with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). - use_cache (`bool`, *optional*, defaults to `True`): - Whether or not the model should return the last key/values attentions (not used by all models). Only - relevant if `config.is_decoder=True`. - classifier_dropout (`float`, *optional*): - The dropout ratio for the classification head. - - Examples: - - ```python - >>> from transformers import ElectraConfig, ElectraModel - - >>> # Initializing a ELECTRA electra-base-uncased style configuration - >>> configuration = ElectraConfig() - - >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration - >>> model = ElectraModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "electra" - - def __init__( - self, - vocab_size=30522, - embedding_size=128, - hidden_size=256, - num_hidden_layers=12, - num_attention_heads=4, - intermediate_size=1024, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=2, - initializer_range=0.02, - layer_norm_eps=1e-12, - summary_type="first", - summary_use_proj=True, - summary_activation="gelu", - summary_last_dropout=0.1, - pad_token_id=0, - position_embedding_type="absolute", - use_cache=True, - classifier_dropout=None, - **kwargs, - ): - super().__init__(pad_token_id=pad_token_id, **kwargs) - - self.vocab_size = vocab_size - self.embedding_size = embedding_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.layer_norm_eps = layer_norm_eps - - self.summary_type = summary_type - self.summary_use_proj = summary_use_proj - self.summary_activation = summary_activation - self.summary_last_dropout = summary_last_dropout - self.position_embedding_type = position_embedding_type - self.use_cache = use_cache - self.classifier_dropout = classifier_dropout - - -class ElectraOnnxConfig(OnnxConfig): - @property - def inputs(self) -> Mapping[str, Mapping[int, str]]: - if self.task == "multiple-choice": - dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} - else: - dynamic_axis = {0: "batch", 1: "sequence"} - return OrderedDict( - [ - ("input_ids", dynamic_axis), - ("attention_mask", dynamic_axis), - ("token_type_ids", dynamic_axis), - ] - ) diff --git a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_registry.py b/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_registry.py deleted file mode 100644 index 4e425a6ec44c7c47a5a106bfdf5ce8062c2110c9..0000000000000000000000000000000000000000 --- a/spaces/ynhe/AskAnything/models/grit_src/third_party/CenterNet2/tests/test_registry.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import unittest -import torch - -from detectron2.modeling.meta_arch import GeneralizedRCNN -from detectron2.utils.registry import _convert_target_to_string, locate - - -class A: - class B: - pass - - -class TestLocate(unittest.TestCase): - def _test_obj(self, obj): - name = _convert_target_to_string(obj) - newobj = locate(name) - self.assertIs(obj, newobj) - - def test_basic(self): - self._test_obj(GeneralizedRCNN) - - def test_inside_class(self): - # requires using __qualname__ instead of __name__ - self._test_obj(A.B) - - def test_builtin(self): - self._test_obj(len) - self._test_obj(dict) - - def test_pytorch_optim(self): - # pydoc.locate does not work for it - self._test_obj(torch.optim.SGD) - - def test_failure(self): - with self.assertRaises(ImportError): - locate("asdf") - - def test_compress_target(self): - from detectron2.data.transforms import RandomCrop - - name = _convert_target_to_string(RandomCrop) - # name shouldn't contain 'augmentation_impl' - self.assertEqual(name, "detectron2.data.transforms.RandomCrop") - self.assertIs(RandomCrop, locate(name)) diff --git a/spaces/yo2266911/uma_voice/README.md b/spaces/yo2266911/uma_voice/README.md deleted file mode 100644 index b2d3fa3d658f72a1dfb97ab417b960b99d0dd714..0000000000000000000000000000000000000000 --- a/spaces/yo2266911/uma_voice/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Uma Voice -emoji: 🚀 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false -duplicated_from: Plachta/uma_voice ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zhanghaohui/szu-gpt-academic/request_llm/bridge_jittorllms_llama.py b/spaces/zhanghaohui/szu-gpt-academic/request_llm/bridge_jittorllms_llama.py deleted file mode 100644 index 6dfac681aeaa11a780304b9e645637cabd677688..0000000000000000000000000000000000000000 --- a/spaces/zhanghaohui/szu-gpt-academic/request_llm/bridge_jittorllms_llama.py +++ /dev/null @@ -1,178 +0,0 @@ - -from transformers import AutoModel, AutoTokenizer -import time -import threading -import importlib -from toolbox import update_ui, get_conf -from multiprocessing import Process, Pipe - -load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" - -################################################################################# -class GetGLMHandle(Process): - def __init__(self): - super().__init__(daemon=True) - self.parent, self.child = Pipe() - self.jittorllms_model = None - self.info = "" - self.local_history = [] - self.success = True - self.check_dependency() - self.start() - self.threadLock = threading.Lock() - - def check_dependency(self): - try: - import pandas - self.info = "依赖检测通过" - self.success = True - except: - from toolbox import trimmed_format_exc - self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ - r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ - r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() - self.success = False - - def ready(self): - return self.jittorllms_model is not None - - def run(self): - # 子进程执行 - # 第一次运行,加载参数 - def validate_path(): - import os, sys - dir_name = os.path.dirname(__file__) - env = os.environ.get("PATH", "") - os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') - root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') - os.chdir(root_dir_assume + '/request_llm/jittorllms') - sys.path.append(root_dir_assume + '/request_llm/jittorllms') - validate_path() # validate path so you can run from base directory - - def load_model(): - import types - try: - if self.jittorllms_model is None: - device, = get_conf('LOCAL_MODEL_DEVICE') - from .jittorllms.models import get_model - # availabel_models = ["chatglm", "pangualpha", "llama", "chatrwkv"] - args_dict = {'model': 'llama'} - print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') - self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) - print('done get model') - except: - self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') - raise RuntimeError("不能正常加载jittorllms的参数!") - print('load_model') - load_model() - - # 进入任务等待状态 - print('进入任务等待状态') - while True: - # 进入任务等待状态 - kwargs = self.child.recv() - query = kwargs['query'] - history = kwargs['history'] - # 是否重置 - if len(self.local_history) > 0 and len(history)==0: - print('触发重置') - self.jittorllms_model.reset() - self.local_history.append(query) - - print('收到消息,开始请求') - try: - for response in self.jittorllms_model.stream_chat(query, history): - print(response) - self.child.send(response) - except: - from toolbox import trimmed_format_exc - print(trimmed_format_exc()) - self.child.send('[Local Message] Call jittorllms fail.') - # 请求处理结束,开始下一个循环 - self.child.send('[Finish]') - - def stream_chat(self, **kwargs): - # 主进程执行 - self.threadLock.acquire() - self.parent.send(kwargs) - while True: - res = self.parent.recv() - if res != '[Finish]': - yield res - else: - break - self.threadLock.release() - -global llama_glm_handle -llama_glm_handle = None -################################################################################# -def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): - """ - 多线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + llama_glm_handle.info - if not llama_glm_handle.success: - error = llama_glm_handle.info - llama_glm_handle = None - raise RuntimeError(error) - - # jittorllms 没有 sys_prompt 接口,因此把prompt加入 history - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可 - response = "" - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - print(response) - if len(observe_window) >= 1: observe_window[0] = response - if len(observe_window) >= 2: - if (time.time()-observe_window[1]) > watch_dog_patience: - raise RuntimeError("程序终止。") - return response - - - -def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): - """ - 单线程方法 - 函数的说明请见 request_llm/bridge_all.py - """ - chatbot.append((inputs, "")) - - global llama_glm_handle - if llama_glm_handle is None: - llama_glm_handle = GetGLMHandle() - chatbot[-1] = (inputs, load_message + "\n\n" + llama_glm_handle.info) - yield from update_ui(chatbot=chatbot, history=[]) - if not llama_glm_handle.success: - llama_glm_handle = None - return - - if additional_fn is not None: - import core_functional - importlib.reload(core_functional) # 热更新prompt - core_functional = core_functional.get_core_functions() - if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话) - inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] - - # 处理历史信息 - history_feedin = [] - for i in range(len(history)//2): - history_feedin.append([history[2*i], history[2*i+1]] ) - - # 开始接收jittorllms的回复 - response = "[Local Message]: 等待jittorllms响应中 ..." - for response in llama_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): - chatbot[-1] = (inputs, response) - yield from update_ui(chatbot=chatbot, history=history) - - # 总结输出 - if response == "[Local Message]: 等待jittorllms响应中 ...": - response = "[Local Message]: jittorllms响应异常 ..." - history.extend([inputs, response]) - yield from update_ui(chatbot=chatbot, history=history)